[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v1 1/3] xen/riscv: introduce setup_initial_pages


  • To: Oleksii Kurochko <oleksii.kurochko@xxxxxxxxx>
  • From: Jan Beulich <jbeulich@xxxxxxxx>
  • Date: Mon, 27 Feb 2023 16:12:05 +0100
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=suse.com; dmarc=pass action=none header.from=suse.com; dkim=pass header.d=suse.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=thipQ+7g6tRxAePPGRBmtb4i0eicBgj4YL/e3xZ826o=; b=WgFDNe0l1pOvR7JLU4wBXv8Vw51RpVwMckZZ3nHcIxpfTFiaSXm9RzOpshzy/k6CHMyFqwN4qoqPOnGTWWelGR8Wiv5mganQZ67aS6vRAwmNY8LCs8TLYWWD1zp5kC6ghvSRKm4UV+IvUnd5tCieG6Rsgq0C9Qxbdp16m4pPY2Bo3Mb1AkNjiMt+HelxGJ3xhow9WuqDwbXiTrNdNOPHzpu26XEJfRoodyXk1yXWnmxouDUAl7xuYfjuoOXWrBJnLv+bCL1lv9Myydhnb39d1vBUxD1YLeLRRrqfKXRGCCWlK78nyRW+sH5Ml7p4Z04EtsN/+HGRS04O9aPZTt38HA==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=gzXTup+AiO2r3seL0Sj6MPcr9JKeFEo+XHKYmqAcfYSU2DtrYI9tIcYRVrYMffyjsD3zL7F70KxmKI1/9IrppqRC7ZrCt/P10c27jZ+H2CGMwc2E3Zh+tc9RkPndMr3IrMEo91WFPhwCTHjCzKDTKYqoDdhCytWeVauPbz0LfgT8wlolnGo7ZLr9vLE1jjGLYmkfwivJ1t97FtXZG5y7TUrB0JNebgOutTDCDc6rfhoei95vZPEfqL6uCp0p/23PpK7AF1WSFBsKBRPejxLQoWpWN2QTI9LowUDI/li6pRsI9iM/NOfU7Jqg1pyrEvLvUTacigesafb+0HxXyDPtoQ==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=suse.com;
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Gianluca Guida <gianluca@xxxxxxxxxxxx>, Bob Eshleman <bobbyeshleman@xxxxxxxxx>, Alistair Francis <alistair.francis@xxxxxxx>, Connor Davis <connojdavis@xxxxxxxxx>, xen-devel@xxxxxxxxxxxxxxxxxxxx
  • Delivery-date: Mon, 27 Feb 2023 15:12:44 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

On 24.02.2023 16:06, Oleksii Kurochko wrote:
> --- /dev/null
> +++ b/xen/arch/riscv/include/asm/page.h
> @@ -0,0 +1,90 @@
> +#ifndef _ASM_RISCV_PAGE_H
> +#define _ASM_RISCV_PAGE_H
> +
> +#include <xen/const.h>
> +#include <xen/types.h>
> +
> +#define PAGE_ENTRIES            512
> +#define VPN_BITS                (9)
> +#define VPN_MASK                ((unsigned long)((1 << VPN_BITS) - 1))
> +
> +#ifdef CONFIG_RISCV_64
> +/* L3 index Bit[47:39] */
> +#define THIRD_SHIFT             (39)
> +#define THIRD_MASK              (VPN_MASK << THIRD_SHIFT)
> +/* L2 index Bit[38:30] */
> +#define SECOND_SHIFT            (30)
> +#define SECOND_MASK             (VPN_MASK << SECOND_SHIFT)
> +/* L1 index Bit[29:21] */
> +#define FIRST_SHIFT             (21)
> +#define FIRST_MASK              (VPN_MASK << FIRST_SHIFT)
> +/* L0 index Bit[20:12] */
> +#define ZEROETH_SHIFT           (12)
> +#define ZEROETH_MASK            (VPN_MASK << ZEROETH_SHIFT)
> +
> +#else // CONFIG_RISCV_32
> +
> +/* L1 index Bit[31:22] */
> +#define FIRST_SHIFT             (22)
> +#define FIRST_MASK              (VPN_MASK << FIRST_SHIFT)
> +
> +/* L0 index Bit[21:12] */
> +#define ZEROETH_SHIFT           (12)
> +#define ZEROETH_MASK            (VPN_MASK << ZEROETH_SHIFT)
> +#endif
> +
> +#define THIRD_SIZE              (1 << THIRD_SHIFT)
> +#define THIRD_MAP_MASK          (~(THIRD_SIZE - 1))
> +#define SECOND_SIZE             (1 << SECOND_SHIFT)
> +#define SECOND_MAP_MASK         (~(SECOND_SIZE - 1))
> +#define FIRST_SIZE              (1 << FIRST_SHIFT)
> +#define FIRST_MAP_MASK          (~(FIRST_SIZE - 1))
> +#define ZEROETH_SIZE            (1 << ZEROETH_SHIFT)
> +#define ZEROETH_MAP_MASK        (~(ZEROETH_SIZE - 1))
> +
> +#define PTE_SHIFT               10
> +
> +#define PTE_VALID               BIT(0, UL)
> +#define PTE_READABLE            BIT(1, UL)
> +#define PTE_WRITABLE            BIT(2, UL)
> +#define PTE_EXECUTABLE          BIT(3, UL)
> +#define PTE_USER                BIT(4, UL)
> +#define PTE_GLOBAL              BIT(5, UL)
> +#define PTE_ACCESSED            BIT(6, UL)
> +#define PTE_DIRTY               BIT(7, UL)
> +#define PTE_RSW                 (BIT(8, UL) | BIT(9, UL))
> +
> +#define PTE_LEAF_DEFAULT        (PTE_VALID | PTE_READABLE | PTE_WRITABLE | 
> PTE_EXECUTABLE)
> +#define PTE_TABLE               (PTE_VALID)
> +
> +/* Calculate the offsets into the pagetables for a given VA */
> +#define zeroeth_linear_offset(va)   ((va) >> ZEROETH_SHIFT)
> +#define first_linear_offset(va)     ((va) >> FIRST_SHIFT)
> +#define second_linear_offset(va)    ((va) >> SECOND_SHIFT)
> +#define third_linear_offset(va)     ((va) >> THIRD_SHIFT)
> +
> +#define pagetable_zeroeth_index(va) zeroeth_linear_offset((va) & 
> ZEROETH_MASK)
> +#define pagetable_first_index(va)   first_linear_offset((va) & FIRST_MASK)
> +#define pagetable_second_index(va)  second_linear_offset((va) & SECOND_MASK)
> +#define pagetable_third_index(va)   third_linear_offset((va) & THIRD_MASK)
> +
> +/* Page Table entry */
> +typedef struct {
> +    uint64_t pte;
> +} pte_t;
> +
> +/* Shift the VPN[x] or PPN[x] fields of a virtual or physical address
> + * to become the shifted PPN[x] fields of a page table entry */
> +#define addr_to_ppn(x) (((x) >> PAGE_SHIFT) << PTE_SHIFT)
> +
> +static inline pte_t paddr_to_pte(unsigned long paddr)
> +{
> +    return (pte_t) { .pte = addr_to_ppn(paddr) };
> +}
> +
> +static inline bool pte_is_valid(pte_t *p)

Btw - const whenever possible please, especially in such basic helpers.

> --- /dev/null
> +++ b/xen/arch/riscv/mm.c
> @@ -0,0 +1,223 @@
> +#include <xen/init.h>
> +#include <xen/lib.h>
> +
> +#include <asm/csr.h>
> +#include <asm/mm.h>
> +#include <asm/page.h>
> +
> +/*
> + * xen_second_pagetable is indexed with the VPN[2] page table entry field
> + * xen_first_pagetable is accessed from the VPN[1] page table entry field
> + * xen_zeroeth_pagetable is accessed from the VPN[0] page table entry field
> + */
> +pte_t xen_second_pagetable[PAGE_ENTRIES] 
> __attribute__((__aligned__(PAGE_SIZE)));

static?

> +static pte_t xen_first_pagetable[PAGE_ENTRIES]
> +    __attribute__((__aligned__(PAGE_SIZE)));
> +static pte_t xen_zeroeth_pagetable[PAGE_ENTRIES]
> +    __attribute__((__aligned__(PAGE_SIZE)));

Please use __aligned() instead of open-coding it. You also may want to
specifiy the section here explicitly, as .bss.page_aligned (as we do
elsewhere).

> +extern unsigned long _stext;
> +extern unsigned long _etext;
> +extern unsigned long __init_begin;
> +extern unsigned long __init_end;
> +extern unsigned long _srodata;
> +extern unsigned long _erodata;

Please use kernel.h and drop then colliding declarations. For what's
left please use array types, as suggested elsewhere already.

> +paddr_t phys_offset;
> +
> +#define resolve_early_addr(x) \
> +    ({                                                                       
>    \
> +         unsigned long * __##x;                                              
>    \
> +        if ( load_addr_start <= x && x < load_addr_end )                     
>    \

Nit: Mismatched indentation.

> +            __##x = (unsigned long *)x;                                      
>    \
> +        else                                                                 
>    \
> +            __##x = (unsigned long *)(x + load_addr_start - 
> linker_addr_start); \
> +        __##x;                                                               
>    \
> +     })
> +
> +static void __init clear_pagetables(unsigned long load_addr_start,
> +                             unsigned long load_addr_end,
> +                             unsigned long linker_addr_start,
> +                             unsigned long linker_addr_end)

Nit (style): Indentation.

> +{
> +    unsigned long *p;
> +    unsigned long page;
> +    unsigned long i;
> +
> +    page = (unsigned long)&xen_second_pagetable[0];
> +
> +    p = resolve_early_addr(page);
> +    for ( i = 0; i < ARRAY_SIZE(xen_second_pagetable); i++ )
> +    {
> +        p[i] = 0ULL;
> +    }

We typically omit braces around single-statement bodies. Here,
though: Why do you do this in the first place? These static arrays
all start out zero-initialized anyway (from when you clear .bss).
Plus even if they didn't - why not memset()?

> +    page = (unsigned long)&xen_first_pagetable[0];
> +    p = resolve_early_addr(page);
> +    for ( i = 0; i < ARRAY_SIZE(xen_first_pagetable); i++ )
> +    {
> +        p[i] = 0ULL;
> +    }
> +
> +    page = (unsigned long)&xen_zeroeth_pagetable[0];
> +    p = resolve_early_addr(page);
> +    for ( i = 0; i < ARRAY_SIZE(xen_zeroeth_pagetable); i++ )
> +    {
> +        p[i] = 0ULL;
> +    }
> +}
> +
> +/*
> + * WARNING: load_addr() and linker_addr() are to be called only when the MMU 
> is
> + * disabled and only when executed by the primary CPU.  They cannot refer to
> + * any global variable or functions.
> + */
> +
> +/*
> + * Convert an addressed layed out at link time to the address where it was 
> loaded
> + * by the bootloader.
> + */
> +#define load_addr(linker_address)                                            
>   \
> +    ({                                                                       
>   \
> +        unsigned long __linker_address = (unsigned long)(linker_address);    
>   \
> +        if ( linker_addr_start <= __linker_address &&                        
>   \
> +            __linker_address < linker_addr_end )                             
>   \
> +        {                                                                    
>   \
> +            __linker_address =                                               
>   \
> +                __linker_address - linker_addr_start + load_addr_start;      
>   \
> +        }                                                                    
>   \
> +        __linker_address;                                                    
>   \
> +    })
> +
> +/* Convert boot-time Xen address from where it was loaded by the boot loader 
> to the address it was layed out
> + * at link-time.
> + */
> +#define linker_addr(load_address)                                            
>   \
> +    ({                                                                       
>   \
> +        unsigned long __load_address = (unsigned long)(load_address);        
>   \
> +        if ( load_addr_start <= __load_address &&                            
>   \
> +            __load_address < load_addr_end )                                 
>   \
> +        {                                                                    
>   \
> +            __load_address =                                                 
>   \
> +                __load_address - load_addr_start + linker_addr_start;        
>   \
> +        }                                                                    
>   \
> +        __load_address;                                                      
>   \
> +    })
> +
> +static void __attribute__((section(".entry")))
> +_setup_initial_pagetables(pte_t *second, pte_t *first, pte_t *zeroeth,

Why the special section (also again further down)?

Jan



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.