[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Minios-devel] [PATCH v3 31/43] arm64: implement the mmap/munmap
- To: Huang Shijie <shijie.huang@xxxxxxx>, wei.liu2@xxxxxxxxxx
- From: Julien Grall <julien.grall@xxxxxxx>
- Date: Mon, 30 Apr 2018 18:36:56 +0100
- Cc: jgross@xxxxxxxx, wei.chen@xxxxxxx, steve.capper@xxxxxxx, vlad.babchuk@xxxxxxxxx, minios-devel@xxxxxxxxxxxxxxxxxxxx, kaly.xin@xxxxxxx, samuel.thibault@xxxxxxxxxxxx, baozich@xxxxxxxxx, nd@xxxxxxx
- Delivery-date: Mon, 30 Apr 2018 17:37:03 +0000
- List-id: Mini-os development list <minios-devel.lists.xenproject.org>
Hi Shijie,
On 16/04/18 07:32, Huang Shijie wrote:
This patch implements the mmap/munmap by adding:
map_frames_ex()/unmap_frames/map_zero
flush_tlb_page() is used to invalidate a page.
Signed-off-by: Huang Shijie <shijie.huang@xxxxxxx>
---
arch/arm/mm.c | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++
include/arm/arch_mm.h | 4 +++
include/arm/arm64/os.h | 16 ++++++++++++
3 files changed, 90 insertions(+)
diff --git a/arch/arm/mm.c b/arch/arm/mm.c
index b1c192a..eb5904f 100644
--- a/arch/arm/mm.c
+++ b/arch/arm/mm.c
@@ -319,6 +319,76 @@ void *ioremap(paddr_t paddr, unsigned long size)
return (void*)addr;
}
+void *map_frames_ex(const unsigned long *f, unsigned long n, unsigned long stride,
+ unsigned long increment, unsigned long alignment, domid_t
id,
+ int *err, unsigned long prot_origin)
+{
+ unsigned long addr, va;
+ unsigned long done = 0;
+ unsigned long mfn;
+ unsigned long prot = MEM_DEF_ATTR;
+ int ret;
+
+ if (!f)
+ return NULL;
+
+ addr = allocate_ondemand(n, alignment);
+ if (!addr)
+ return NULL;
+
+ va = addr;
+ if (prot_origin == MEM_RO_ATTR)
+ prot = prot_origin;
+
+ while (done < n) {
+ mfn = f[done * stride] + done * increment;
+ ret = build_pagetable(va, mfn, 1, prot, alloc_new_page, 3);
+ if (ret)
+ return NULL;
+ done++;
+ va += PAGE_SIZE;
+ }
+
+ return (void *)addr;
+}
+
+static lpae_t *get_ptep(unsigned long vaddr)
+{
+ lpae_t *pgd, *pud, *pmd, *pte;
+
+ pgd = &boot_l0_pgtable[l0_pgt_idx(vaddr)];
+ ASSERT((*pgd) != L0_INVAL);
ASSERT should only be used when you now something should always be true.
I don't think you can assume that a caller of get_ptep will always give
a valid entry. Also, this will prevent unmap_frames to handle NULL
pointer (x86 does handle it).
+
+ pud = (lpae_t *)to_virt((*pgd) & ~ATTR_MASK_L) + l1_pgt_idx(vaddr);
+ ASSERT((*pud) != L0_INVAL);
+
+ pmd = (lpae_t *)to_virt((*pud) & ~ATTR_MASK_L) + l2_pgt_idx(vaddr);
+ ASSERT((*pmd) != L0_INVAL);
+
+ pte = (lpae_t *)to_virt((*pmd) & ~ATTR_MASK_L) + l3_pgt_idx(vaddr);
+ ASSERT((*pte) != L0_INVAL);
+
+ return pte;
+}
+
+int unmap_frames(unsigned long va, unsigned long num_frames)
+{
+ lpae_t *pte;
+
+ ASSERT(!((unsigned long)va & ~PAGE_MASK));
+
+ while (num_frames) {
while ( ... )
{
+ pte = get_ptep(va);
if ( pte )
+ *pte = (lpae_t)0;
Hard tab.
Also, I am pretty sure you want to set_pgt_entry here.
+
+ flush_tlb_page(va);
+
+ va += PAGE_SIZE;
+ num_frames--;
+ }
+ return 0;
+}
+
void arch_init_mm(unsigned long *start_pfn_p, unsigned long *max_pfn_p)
{
int memory;
diff --git a/include/arm/arch_mm.h b/include/arm/arch_mm.h
index ad122e7..09a19c0 100644
--- a/include/arm/arch_mm.h
+++ b/include/arm/arch_mm.h
@@ -42,4 +42,8 @@ void arch_mm_preinit(void *dtb_pointer);
#define map_frames(f, n) (NULL)
void *ioremap(paddr_t addr, unsigned long size);
+
+extern unsigned long mfn_zero;
+#define map_zero(n, a) map_frames_ex(&mfn_zero, n, 0, 0, a, DOMID_SELF, NULL,
MEM_RO_ATTR)
I need some context here. What this function used for? Where does
mfn_zero is defined?
+
#endif
diff --git a/include/arm/arm64/os.h b/include/arm/arm64/os.h
index 2d55023..4e777bf 100644
--- a/include/arm/arm64/os.h
+++ b/include/arm/arm64/os.h
@@ -27,6 +27,22 @@ static inline void local_irq_enable(void)
__asm__ __volatile__("mrs %0, daif":"=r"(x)::"memory"); \
}
+/* Flush a page in innershareable doman */
+static inline void flush_tlb_page(unsigned long va)
+{
+ unsigned long xt;
+
+ /* xt[43:0] to save VA[55:12] */
+ xt = va >> 12;
va >> PAGE_SHIFT. But I am not sure why you only save bits up to 55.
AFAICT, what you will do for bits [63:56]?
+
+ __asm__ __volatile__(
+ "dsb sy;"
+ "tlbi vale1is, %0;"
+ "dsb sy;"
+ "isb;"
+ ::"r"(xt): "memory");
It is probably quite heavy for calling from a loop. But we can optimize
afterwards.
+}
+
#endif
/* The Callee-saved registers : x19 ~ x29 */
Cheers,
--
Julien Grall
_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel
|