Hopefully the last iteration of this series. The only change this time round is to the patch adding the zeroeth level pt accessors, to remove the ifdef and to update the comment. Everything else has been acked (thanks all). Ian.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> --- xen/include/asm-arm/config.h | 18 ++++++++++-------- 1 files changed, 10 insertions(+), 8 deletions(-) diff --git a/xen/include/asm-arm/config.h b/xen/include/asm-arm/config.h index e3cfaf1..7e5daa0 100644 --- a/xen/include/asm-arm/config.h +++ b/xen/include/asm-arm/config.h @@ -72,6 +72,8 @@ END(name) #endif +#include <xen/const.h> + /* * Memory layout: * 0 - 2M Unmapped @@ -91,14 +93,14 @@ * - in setup_pagetables() when relocating Xen. */ -#define XEN_VIRT_START mk_unsigned_long(0x00200000) -#define FIXMAP_ADDR(n) (mk_unsigned_long(0x00400000) + (n) * PAGE_SIZE) -#define BOOT_MISC_VIRT_START mk_unsigned_long(0x00600000) -#define FRAMETABLE_VIRT_START mk_unsigned_long(0x02000000) -#define VMAP_VIRT_START mk_unsigned_long(0x10000000) -#define XENHEAP_VIRT_START mk_unsigned_long(0x40000000) -#define DOMHEAP_VIRT_START mk_unsigned_long(0x80000000) -#define DOMHEAP_VIRT_END mk_unsigned_long(0xffffffff) +#define XEN_VIRT_START _AC(0x00200000,UL) +#define FIXMAP_ADDR(n) (_AC(0x00400000,UL) + (n) * PAGE_SIZE) +#define BOOT_MISC_VIRT_START _AC(0x00600000,UL) +#define FRAMETABLE_VIRT_START _AC(0x02000000,UL) +#define VMAP_VIRT_START _AC(0x10000000,UL) +#define XENHEAP_VIRT_START _AC(0x40000000,UL) +#define DOMHEAP_VIRT_START _AC(0x80000000,UL) +#define DOMHEAP_VIRT_END _AC(0xffffffff,UL) #define VMAP_VIRT_END XENHEAP_VIRT_START #define HYPERVISOR_VIRT_START XEN_VIRT_START -- 1.7.2.5
Ian Campbell
2013-Aug-08 12:15 UTC
[PATCH v3 02/10] xen: arm: Add zeroeth level page table macros and defines
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- v3: Don''t ifdef rely on >>39 being invalid for 32-bit words Update comment. --- xen/include/asm-arm/page.h | 19 +++++++++++++++---- 1 files changed, 15 insertions(+), 4 deletions(-) diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h index 41e9eff..93bb8c0 100644 --- a/xen/include/asm-arm/page.h +++ b/xen/include/asm-arm/page.h @@ -309,9 +309,15 @@ static inline int gva_to_ipa(vaddr_t va, paddr_t *paddr) #endif /* __ASSEMBLY__ */ -/* These numbers add up to a 39-bit input address space. The ARMv7-A - * architecture actually specifies a 40-bit input address space for the p2m, - * with an 8K (1024-entry) top-level table. */ +/* + * These numbers add up to a 48-bit input address space. + * + * On 32-bit the zeroeth level does not exist, therefore the total is + * 39-bits. The ARMv7-A architecture actually specifies a 40-bit input + * address space for the p2m, with an 8K (1024-entry) top-level table. + * However Xen only supports 16GB of RAM on 32-bit ARM systems and + * therefore 39-bits are sufficient. + */ #define LPAE_SHIFT 9 #define LPAE_ENTRIES (1u << LPAE_SHIFT) @@ -326,8 +332,12 @@ static inline int gva_to_ipa(vaddr_t va, paddr_t *paddr) #define FIRST_SHIFT (SECOND_SHIFT + LPAE_SHIFT) #define FIRST_SIZE (1u << FIRST_SHIFT) #define FIRST_MASK (~(FIRST_SIZE - 1)) +#define ZEROETH_SHIFT (FIRST_SHIFT + LPAE_SHIFT) +#define ZEROETH_SIZE (1u << ZEROETH_SHIFT) +#define ZEROETH_MASK (~(ZEROETH_SIZE - 1)) /* Calculate the offsets into the pagetables for a given VA */ +#define zeroeth_linear_offset(va) ((va) >> ZEROETH_SHIFT) #define first_linear_offset(va) ((va) >> FIRST_SHIFT) #define second_linear_offset(va) ((va) >> SECOND_SHIFT) #define third_linear_offset(va) ((va) >> THIRD_SHIFT) @@ -336,8 +346,9 @@ static inline int gva_to_ipa(vaddr_t va, paddr_t *paddr) #define first_table_offset(va) TABLE_OFFSET(first_linear_offset(va)) #define second_table_offset(va) TABLE_OFFSET(second_linear_offset(va)) #define third_table_offset(va) TABLE_OFFSET(third_linear_offset(va)) +#define zeroeth_table_offset(va) TABLE_OFFSET(zeroeth_linear_offset(va)) -#define clear_page(page)memset((void *)(page), 0, PAGE_SIZE) +#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) #define PAGE_ALIGN(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK) -- 1.7.2.5
Ian Campbell
2013-Aug-08 12:15 UTC
[PATCH v3 03/10] xen: arm: Rename page table "hint" field to slightly more descriptive "contig"
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> --- xen/arch/arm/mm.c | 2 +- xen/include/asm-arm/page.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c index f301e65..a4d65d8 100644 --- a/xen/arch/arm/mm.c +++ b/xen/arch/arm/mm.c @@ -526,7 +526,7 @@ static void __init create_mappings(unsigned long virt, count = nr_mfns / LPAE_ENTRIES; p = xen_second + second_linear_offset(virt); pte = mfn_to_xen_entry(base_mfn); - pte.pt.hint = 1; /* These maps are in 16-entry contiguous chunks. */ + pte.pt.contig = 1; /* These maps are in 16-entry contiguous chunks. */ for ( i = 0; i < count; i++ ) { write_pte(p + i, pte); diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h index 93bb8c0..3d0f8a9 100644 --- a/xen/include/asm-arm/page.h +++ b/xen/include/asm-arm/page.h @@ -115,7 +115,7 @@ typedef struct { /* These seven bits are only used in Block entries and are ignored * in Table entries. */ - unsigned long hint:1; /* In a block of 16 contiguous entries */ + unsigned long contig:1; /* In a block of 16 contiguous entries */ unsigned long pxn:1; /* Privileged-XN */ unsigned long xn:1; /* eXecute-Never */ unsigned long avail:4; /* Ignored by hardware */ @@ -150,7 +150,7 @@ typedef struct { /* These seven bits are only used in Block entries and are ignored * in Table entries. */ - unsigned long hint:1; /* In a block of 16 contiguous entries */ + unsigned long contig:1; /* In a block of 16 contiguous entries */ unsigned long sbz2:1; unsigned long xn:1; /* eXecute-Never */ unsigned long avail:4; /* Ignored by hardware */ -- 1.7.2.5
Allow it to work on contiguous second level tables mapping an arbitrary region of memory. Rename it to create_32mb_mappings. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> --- xen/arch/arm/mm.c | 19 +++++++++++-------- 1 files changed, 11 insertions(+), 8 deletions(-) diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c index a4d65d8..1ad8fe3 100644 --- a/xen/arch/arm/mm.c +++ b/xen/arch/arm/mm.c @@ -511,20 +511,23 @@ void __cpuinit mmu_init_secondary_cpu(void) } /* Create Xen''s mappings of memory. - * Base and virt must be 32MB aligned and size a multiple of 32MB. */ -static void __init create_mappings(unsigned long virt, - unsigned long base_mfn, - unsigned long nr_mfns) + * Base and virt must be 32MB aligned and size a multiple of 32MB. + * second must be a contiguous set of second level page tables + * covering the region starting at virt_offset. */ +static void __init create_32mb_mappings(lpae_t *second, + unsigned long virt_offset, + unsigned long base_mfn, + unsigned long nr_mfns) { unsigned long i, count; lpae_t pte, *p; - ASSERT(!((virt >> PAGE_SHIFT) % (16 * LPAE_ENTRIES))); + ASSERT(!((virt_offset >> PAGE_SHIFT) % (16 * LPAE_ENTRIES))); ASSERT(!(base_mfn % (16 * LPAE_ENTRIES))); ASSERT(!(nr_mfns % (16 * LPAE_ENTRIES))); count = nr_mfns / LPAE_ENTRIES; - p = xen_second + second_linear_offset(virt); + p = second + second_linear_offset(virt_offset); pte = mfn_to_xen_entry(base_mfn); pte.pt.contig = 1; /* These maps are in 16-entry contiguous chunks. */ for ( i = 0; i < count; i++ ) @@ -539,7 +542,7 @@ static void __init create_mappings(unsigned long virt, void __init setup_xenheap_mappings(unsigned long base_mfn, unsigned long nr_mfns) { - create_mappings(XENHEAP_VIRT_START, base_mfn, nr_mfns); + create_32mb_mappings(xen_second, XENHEAP_VIRT_START, base_mfn, nr_mfns); /* Record where the xenheap is, for translation routines. */ xenheap_virt_end = XENHEAP_VIRT_START + nr_mfns * PAGE_SIZE; @@ -559,7 +562,7 @@ void __init setup_frametable_mappings(paddr_t ps, paddr_t pe) /* Round up to 32M boundary */ frametable_size = (frametable_size + 0x1ffffff) & ~0x1ffffff; base_mfn = alloc_boot_pages(frametable_size >> PAGE_SHIFT, 32<<(20-12)); - create_mappings(FRAMETABLE_VIRT_START, base_mfn, frametable_size >> PAGE_SHIFT); + create_32mb_mappings(xen_second, FRAMETABLE_VIRT_START, base_mfn, frametable_size >> PAGE_SHIFT); memset(&frame_table[0], 0, nr_pages * sizeof(struct page_info)); memset(&frame_table[nr_pages], -1, -- 1.7.2.5
Ian Campbell
2013-Aug-08 12:15 UTC
[PATCH v3 05/10] xen: arm: define a macro to get this CPUs page table root
In a future patch the 32- and 64-bit root page tables will differ. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> --- xen/arch/arm/mm.c | 5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c index 1ad8fe3..7781202 100644 --- a/xen/arch/arm/mm.c +++ b/xen/arch/arm/mm.c @@ -60,6 +60,7 @@ lpae_t boot_first[LPAE_ENTRIES] __attribute__((__aligned__(4096))); /* Per-CPU pagetable pages */ /* xen_pgtable == root of the trie (zeroeth level on 64-bit, first on 32-bit) */ static DEFINE_PER_CPU(lpae_t *, xen_pgtable); +#define THIS_CPU_PGTABLE this_cpu(xen_pgtable) /* xen_dommap == pages used by map_domain_page, these pages contain * the second level pagetables which mapp the domheap region * DOMHEAP_VIRT_START...DOMHEAP_VIRT_END in 2MB chunks. */ @@ -147,7 +148,7 @@ done: void dump_hyp_walk(vaddr_t addr) { uint64_t ttbr = READ_SYSREG64(TTBR0_EL2); - lpae_t *pgtable = this_cpu(xen_pgtable); + lpae_t *pgtable = THIS_CPU_PGTABLE; printk("Walking Hypervisor VA 0x%"PRIvaddr" " "on CPU%d via TTBR 0x%016"PRIx64"\n", @@ -502,7 +503,7 @@ void __cpuinit mmu_init_secondary_cpu(void) uint64_t ttbr; /* Change to this CPU''s pagetables */ - ttbr = (uintptr_t)virt_to_maddr(this_cpu(xen_pgtable)); + ttbr = (uintptr_t)virt_to_maddr(THIS_CPU_PGTABLE); WRITE_TTBR(ttbr); /* From now on, no mapping may be both writable and executable. */ -- 1.7.2.5
The open coded version is pretty ugly, not helped by rigid enforcement of an 80 character line length. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> --- xen/arch/arm/mm.c | 13 +++++++++---- 1 files changed, 9 insertions(+), 4 deletions(-) diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c index 7781202..974d6df 100644 --- a/xen/arch/arm/mm.c +++ b/xen/arch/arm/mm.c @@ -331,6 +331,13 @@ void __cpuinit setup_virt_paging(void) * write to TTBR0 has completed. */ \ flush_xen_text_tlb() +static inline lpae_t pte_of_xenaddr(vaddr_t va) +{ + paddr_t ma = va + phys_offset; + unsigned long mfn = ma >> PAGE_SHIFT; + return mfn_to_xen_entry(mfn); +} + /* Boot-time pagetable setup. * Changes here may need matching changes in head.S */ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr) @@ -387,8 +394,7 @@ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr) flush_xen_text_tlb(); /* Link in the fixmap pagetable */ - pte = mfn_to_xen_entry((((unsigned long) xen_fixmap) + phys_offset) - >> PAGE_SHIFT); + pte = pte_of_xenaddr((vaddr_t)xen_fixmap); pte.pt.table = 1; write_pte(xen_second + second_table_offset(FIXMAP_ADDR(0)), pte); /* @@ -415,8 +421,7 @@ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr) write_pte(xen_xenmap + i, pte); /* No flush required here as page table is not hooked in yet. */ } - pte = mfn_to_xen_entry((((unsigned long) xen_xenmap) + phys_offset) - >> PAGE_SHIFT); + pte = pte_of_xenaddr((vaddr_t)xen_xenmap); pte.pt.table = 1; write_pte(xen_second + second_linear_offset(XEN_VIRT_START), pte); /* TLBFLUSH and ISB would be needed here, but wait until we set WXN */ -- 1.7.2.5
The current implementation is nonsense since the xenheap and the xen text/data/etc mappings are nowhere near each other. This is only actually used by the page offlining code, which isn''t active on ARM. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> --- xen/include/asm-arm/mm.h | 4 +++- 1 files changed, 3 insertions(+), 1 deletions(-) diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h index 5e7c5a3..baaf9c3 100644 --- a/xen/include/asm-arm/mm.h +++ b/xen/include/asm-arm/mm.h @@ -123,7 +123,9 @@ extern unsigned long xenheap_virt_end; unsigned long _mfn = (mfn); \ (_mfn >= xenheap_mfn_start && _mfn < xenheap_mfn_end); \ }) -#define is_xen_fixed_mfn(mfn) is_xen_heap_mfn(mfn) +#define is_xen_fixed_mfn(mfn) \ + ((((mfn) << PAGE_SHIFT) >= virt_to_maddr(&_start)) && \ + (((mfn) << PAGE_SHIFT) <= virt_to_maddr(&_end))) #define page_get_owner(_p) (_p)->v.inuse.domain #define page_set_owner(_p,_d) ((_p)->v.inuse.domain = (_d)) -- 1.7.2.5
Ian Campbell
2013-Aug-08 12:15 UTC
[PATCH v3 08/10] xen: arm: allow virt_to_maddr to take either a pointer or an integer
This seems to be expected by common code which passes both pointers and unsigned long as virtual addresses. The latter case in particular is in init_node_heap() under a DIRECTMAP_VIRT_END #ifdef, which is why it hasn''t affected us yet (but will in a subsequent patch). The new prototypes match the x86 versions apart from using vaddr_t instead of unsigned long. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> --- xen/include/asm-arm/mm.h | 7 ++++--- 1 files changed, 4 insertions(+), 3 deletions(-) diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h index baaf9c3..7aca836 100644 --- a/xen/include/asm-arm/mm.h +++ b/xen/include/asm-arm/mm.h @@ -208,11 +208,12 @@ static inline void __iomem *ioremap_wc(paddr_t start, size_t len) #define paddr_to_pdx(pa) pfn_to_pdx(paddr_to_pfn(pa)) -static inline paddr_t virt_to_maddr(const void *va) +static inline paddr_t __virt_to_maddr(vaddr_t va) { - uint64_t par = va_to_par((vaddr_t)va); - return (par & PADDR_MASK & PAGE_MASK) | ((vaddr_t) va & ~PAGE_MASK); + uint64_t par = va_to_par(va); + return (par & PADDR_MASK & PAGE_MASK) | (va & ~PAGE_MASK); } +#define virt_to_maddr(va) __virt_to_maddr((vaddr_t)(va)) static inline void *maddr_to_virt(paddr_t ma) { -- 1.7.2.5
Ian Campbell
2013-Aug-08 12:15 UTC
[PATCH v3 09/10] xen: gate split heap code on its own config option rather than !X86
I''m going to want to disable this for 64 bit ARM. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Jan Beulich <jbeulich@suse.com> Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Acked-by: Keir Fraser <keir@xen.org> --- xen/common/page_alloc.c | 2 +- xen/include/asm-arm/config.h | 1 + 2 files changed, 2 insertions(+), 1 deletions(-) diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index 25a7d3d..41251b2 100644 --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -1305,7 +1305,7 @@ void __init scrub_heap_pages(void) * XEN-HEAP SUB-ALLOCATOR */ -#if !defined(CONFIG_X86) +#if defined(CONFIG_SEPARATE_XENHEAP) void init_xenheap_pages(paddr_t ps, paddr_t pe) { diff --git a/xen/include/asm-arm/config.h b/xen/include/asm-arm/config.h index 7e5daa0..fb9e93c 100644 --- a/xen/include/asm-arm/config.h +++ b/xen/include/asm-arm/config.h @@ -36,6 +36,7 @@ #define CONFIG_SMP 1 #define CONFIG_DOMAIN_PAGE 1 +#define CONFIG_SEPARATE_XENHEAP 1 #define CONFIG_VIDEO 1 -- 1.7.2.5
Ian Campbell
2013-Aug-08 12:15 UTC
[PATCH v3 10/10] xen: arm: Use a direct mapping of RAM on arm64
We have plenty of virtual address space so we can avoid needing to map and unmap pages all the time. A totally arbitrarily chosen 32GB frame table leads to support for 5TB of RAM. I haven''t tested with anything near that amount of RAM though. There is plenty of room to expand further when that becomes necessary. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> --- v2: Remove obsolete comments hung over from the 32-bit version Reformat TODO item about pte.pt.contig Duplicate prototype for alternate functions completely inside appropriate ifdef instead of just ifdeffing the body. --- xen/arch/arm/mm.c | 157 ++++++++++++++++++++++++++++++++++-------- xen/arch/arm/setup.c | 77 ++++++++++++++++++++ xen/include/asm-arm/config.h | 96 +++++++++++++++++++------- xen/include/asm-arm/mm.h | 16 ++++ 4 files changed, 293 insertions(+), 53 deletions(-) diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c index 974d6df..69c157a 100644 --- a/xen/arch/arm/mm.c +++ b/xen/arch/arm/mm.c @@ -37,6 +37,7 @@ #include <public/memory.h> #include <xen/sched.h> #include <xen/vmap.h> +#include <asm/early_printk.h> #include <xsm/xsm.h> #include <xen/pfn.h> @@ -48,6 +49,14 @@ struct domain *dom_xen, *dom_io, *dom_cow; lpae_t boot_pgtable[LPAE_ENTRIES] __attribute__((__aligned__(4096))); #ifdef CONFIG_ARM_64 lpae_t boot_first[LPAE_ENTRIES] __attribute__((__aligned__(4096))); +/* The first page of the first level mapping of the xenheap. The + * subsequent xenheap first level pages are dynamically allocated, but + * we need this one to bootstrap ourselves. */ +lpae_t xenheap_first_first[LPAE_ENTRIES] __attribute__((__aligned__(4096))); +/* The zeroeth level slot which uses xenheap_first_first. Used because + * setup_xenheap_mappings otherwise relies on mfn_to_virt which isn''t + * valid for a non-xenheap mapping. */ +static __initdata int xenheap_first_first_slot = -1; #endif /* @@ -57,6 +66,9 @@ lpae_t boot_first[LPAE_ENTRIES] __attribute__((__aligned__(4096))); * xen_second, xen_fixmap and xen_xenmap are shared between all PCPUs. */ +#ifdef CONFIG_ARM_64 +#define THIS_CPU_PGTABLE boot_pgtable +#else /* Per-CPU pagetable pages */ /* xen_pgtable == root of the trie (zeroeth level on 64-bit, first on 32-bit) */ static DEFINE_PER_CPU(lpae_t *, xen_pgtable); @@ -65,6 +77,7 @@ static DEFINE_PER_CPU(lpae_t *, xen_pgtable); * the second level pagetables which mapp the domheap region * DOMHEAP_VIRT_START...DOMHEAP_VIRT_END in 2MB chunks. */ static DEFINE_PER_CPU(lpae_t *, xen_dommap); +#endif /* Common pagetable leaves */ /* Second level page tables. @@ -73,10 +86,16 @@ static DEFINE_PER_CPU(lpae_t *, xen_dommap); * addresses from 0 to 0x7fffffff. Offsets into it are calculated * with second_linear_offset(), not second_table_offset(). * - * Addresses 0x80000000 to 0xffffffff are covered by the per-cpu - * xen_domheap mappings described above. However we allocate 4 pages + * On 32bit addresses 0x80000000 to 0xffffffff are covered by the + * per-cpu xen_domheap mappings described above. We allocate 4 pages * here for use in the boot page tables and the second two pages * become the boot CPUs xen_dommap pages. + * + * On 64bit addresses 0x80000000 to 0xffffffff are unused. However we + * allocate 4 pages here for use while relocating Xen, which currently + * expects a second level page to exist for all addresses in the first + * 4GB. We need to keep these extra mappings in place for seconary CPU + * bring up too. For now we just leave them forever. */ lpae_t xen_second[LPAE_ENTRIES*4] __attribute__((__aligned__(4096*4))); /* First level page table used for fixmap */ @@ -92,7 +111,7 @@ uint64_t boot_ttbr; static paddr_t phys_offset; /* Limits of the Xen heap */ -unsigned long xenheap_mfn_start __read_mostly; +unsigned long xenheap_mfn_start __read_mostly = ~0UL; unsigned long xenheap_mfn_end __read_mostly; unsigned long xenheap_virt_end __read_mostly; @@ -112,7 +131,9 @@ static inline void check_memory_layout_alignment_constraints(void) { BUILD_BUG_ON(BOOT_MISC_VIRT_START & ~SECOND_MASK); /* 1GB aligned regions */ BUILD_BUG_ON(XENHEAP_VIRT_START & ~FIRST_MASK); +#ifdef CONFIG_DOMAIN_PAGE BUILD_BUG_ON(DOMHEAP_VIRT_START & ~FIRST_MASK); +#endif } void dump_pt_walk(lpae_t *first, paddr_t addr) @@ -180,6 +201,7 @@ void clear_fixmap(unsigned map) flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE); } +#ifdef CONFIG_DOMAIN_PAGE void *map_domain_page_global(unsigned long mfn) { return vmap(&mfn, 1); @@ -284,6 +306,7 @@ unsigned long domain_page_map_to_mfn(const void *va) return map[slot].pt.base + offset; } +#endif void __init arch_init_memory(void) { @@ -431,6 +454,7 @@ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr) /* Flush everything after setting WXN bit. */ flush_xen_text_tlb(); +#ifdef CONFIG_ARM_32 per_cpu(xen_pgtable, 0) = boot_pgtable; per_cpu(xen_dommap, 0) = xen_second + second_linear_offset(DOMHEAP_VIRT_START); @@ -440,43 +464,33 @@ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr) memset(this_cpu(xen_dommap), 0, DOMHEAP_SECOND_PAGES*PAGE_SIZE); flush_xen_dcache_va_range(this_cpu(xen_dommap), DOMHEAP_SECOND_PAGES*PAGE_SIZE); +#endif } - +#ifdef CONFIG_ARM_64 +int init_secondary_pagetables(int cpu) +{ + /* All CPUs share a single page table on 64 bit */ + return 0; +} +#else int init_secondary_pagetables(int cpu) { - lpae_t *root, *first, *domheap, pte; + lpae_t *first, *domheap, pte; int i; - root = alloc_xenheap_page(); -#ifdef CONFIG_ARM_64 - first = alloc_xenheap_page(); -#else - first = root; /* root == first level on 32-bit 3-level trie */ -#endif + first = alloc_xenheap_page(); /* root == first level on 32-bit 3-level trie */ domheap = alloc_xenheap_pages(get_order_from_pages(DOMHEAP_SECOND_PAGES), 0); - if ( root == NULL || domheap == NULL || first == NULL ) + if ( domheap == NULL || first == NULL ) { printk("Not enough free memory for secondary CPU%d pagetables\n", cpu); free_xenheap_pages(domheap, get_order_from_pages(DOMHEAP_SECOND_PAGES)); -#ifdef CONFIG_ARM_64 free_xenheap_page(first); -#endif - free_xenheap_page(root); return -ENOMEM; } /* Initialise root pagetable from root of boot tables */ - memcpy(root, boot_pgtable, PAGE_SIZE); - -#ifdef CONFIG_ARM_64 - /* Initialise first pagetable from first level of boot tables, and - * hook into the new root. */ - memcpy(first, boot_first, PAGE_SIZE); - pte = mfn_to_xen_entry(virt_to_mfn(first)); - pte.pt.table = 1; - write_pte(root, pte); -#endif + memcpy(first, boot_pgtable, PAGE_SIZE); /* Ensure the domheap has no stray mappings */ memset(domheap, 0, DOMHEAP_SECOND_PAGES*PAGE_SIZE); @@ -490,17 +504,15 @@ int init_secondary_pagetables(int cpu) write_pte(&first[first_table_offset(DOMHEAP_VIRT_START+i*FIRST_SIZE)], pte); } - flush_xen_dcache_va_range(root, PAGE_SIZE); -#ifdef CONFIG_ARM_64 flush_xen_dcache_va_range(first, PAGE_SIZE); -#endif flush_xen_dcache_va_range(domheap, DOMHEAP_SECOND_PAGES*PAGE_SIZE); - per_cpu(xen_pgtable, cpu) = root; + per_cpu(xen_pgtable, cpu) = first; per_cpu(xen_dommap, cpu) = domheap; return 0; } +#endif /* MMU setup for secondary CPUS (which already have paging enabled) */ void __cpuinit mmu_init_secondary_cpu(void) @@ -544,6 +556,7 @@ static void __init create_32mb_mappings(lpae_t *second, flush_xen_data_tlb(); } +#ifdef CONFIG_ARM_32 /* Set up the xenheap: up to 1GB of contiguous, always-mapped memory. */ void __init setup_xenheap_mappings(unsigned long base_mfn, unsigned long nr_mfns) @@ -555,6 +568,73 @@ void __init setup_xenheap_mappings(unsigned long base_mfn, xenheap_mfn_start = base_mfn; xenheap_mfn_end = base_mfn + nr_mfns; } +#else /* CONFIG_ARM_64 */ +void __init setup_xenheap_mappings(unsigned long base_mfn, + unsigned long nr_mfns) +{ + lpae_t *first, pte; + unsigned long offset, end_mfn; + vaddr_t vaddr; + + /* First call sets the xenheap physical offset. */ + if ( xenheap_mfn_start == ~0UL ) + xenheap_mfn_start = base_mfn; + + if ( base_mfn < xenheap_mfn_start ) + early_panic("cannot add xenheap mapping at %lx below heap start %lx\n", + base_mfn, xenheap_mfn_start); + + end_mfn = base_mfn + nr_mfns; + + /* Align to previous 1GB boundary */ + base_mfn &= ~FIRST_MASK; + + offset = base_mfn - xenheap_mfn_start; + vaddr = DIRECTMAP_VIRT_START + offset*PAGE_SIZE; + + while ( base_mfn < end_mfn ) + { + int slot = zeroeth_table_offset(vaddr); + lpae_t *p = &boot_pgtable[slot]; + + if ( p->pt.valid ) + { + /* mfn_to_virt is not valid on the 1st 1st mfn, since it + * is not within the xenheap. */ + first = slot == xenheap_first_first_slot ? + xenheap_first_first : mfn_to_virt(p->pt.base); + } + else if ( xenheap_first_first_slot == -1) + { + /* Use xenheap_first_first to bootstrap the mappings */ + first = xenheap_first_first; + + pte = pte_of_xenaddr((vaddr_t)xenheap_first_first); + pte.pt.table = 1; + write_pte(p, pte); + + xenheap_first_first_slot = slot; + } + else + { + unsigned long first_mfn = alloc_boot_pages(1, 1); + pte = mfn_to_xen_entry(first_mfn); + pte.pt.table = 1; + write_pte(p, pte); + first = mfn_to_virt(first_mfn); + } + + pte = mfn_to_xen_entry(base_mfn); + /* TODO: Set pte.pt.contig when appropriate. */ + write_pte(&first[first_table_offset(vaddr)], pte); + + base_mfn += FIRST_SIZE>>PAGE_SHIFT; + vaddr += FIRST_SIZE; + } + + flush_xen_data_tlb(); +} +#endif /* Map a frame table to cover physical addresses ps through pe */ void __init setup_frametable_mappings(paddr_t ps, paddr_t pe) @@ -562,13 +642,32 @@ void __init setup_frametable_mappings(paddr_t ps, paddr_t pe) unsigned long nr_pages = (pe - ps) >> PAGE_SHIFT; unsigned long frametable_size = nr_pages * sizeof(struct page_info); unsigned long base_mfn; +#ifdef CONFIG_ARM_64 + lpae_t *second, pte; + unsigned long nr_second, second_base; + int i; +#endif frametable_base_mfn = ps >> PAGE_SHIFT; /* Round up to 32M boundary */ frametable_size = (frametable_size + 0x1ffffff) & ~0x1ffffff; base_mfn = alloc_boot_pages(frametable_size >> PAGE_SHIFT, 32<<(20-12)); + +#ifdef CONFIG_ARM_64 + nr_second = frametable_size >> SECOND_SHIFT; + second_base = alloc_boot_pages(nr_second, 1); + second = mfn_to_virt(second_base); + for ( i = 0; i < nr_second; i++ ) + { + pte = mfn_to_xen_entry(second_base + i); + pte.pt.table = 1; + write_pte(&boot_first[first_table_offset(FRAMETABLE_VIRT_START)+i], pte); + } + create_32mb_mappings(second, 0, base_mfn, frametable_size >> PAGE_SHIFT); +#else create_32mb_mappings(xen_second, FRAMETABLE_VIRT_START, base_mfn, frametable_size >> PAGE_SHIFT); +#endif memset(&frame_table[0], 0, nr_pages * sizeof(struct page_info)); memset(&frame_table[nr_pages], -1, diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c index 1ec5e38..a25e483 100644 --- a/xen/arch/arm/setup.c +++ b/xen/arch/arm/setup.c @@ -278,6 +278,7 @@ static paddr_t __init get_xen_paddr(void) return paddr; } +#ifdef CONFIG_ARM_32 static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) { paddr_t ram_start; @@ -402,6 +403,82 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) end_boot_allocator(); } +#else /* CONFIG_ARM_64 */ +static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) +{ + paddr_t ram_start = ~0; + paddr_t ram_end = 0; + int bank; + unsigned long xenheap_pages = 0; + unsigned long dtb_pages; + + total_pages = 0; + for ( bank = 0 ; bank < early_info.mem.nr_banks; bank++ ) + { + paddr_t bank_start = early_info.mem.bank[bank].start; + paddr_t bank_size = early_info.mem.bank[bank].size; + paddr_t bank_end = bank_start + bank_size; + unsigned long bank_pages = bank_size >> PAGE_SHIFT; + paddr_t s, e; + + total_pages += bank_pages; + + if ( bank_start < ram_start ) + ram_start = bank_start; + if ( bank_end > ram_end ) + ram_end = bank_end; + + xenheap_pages += (bank_size >> PAGE_SHIFT); + + /* XXX we assume that the ram regions are ordered */ + s = bank_start; + while ( s < bank_end ) + { + paddr_t n = bank_end; + + e = next_module(s, &n); + + if ( e == ~(paddr_t)0 ) + { + e = n = bank_end; + } + + setup_xenheap_mappings(s>>PAGE_SHIFT, (e-s)>>PAGE_SHIFT); + + xenheap_mfn_end = e; + + init_boot_pages(s, e); + s = n; + } + } + + xenheap_virt_end = XENHEAP_VIRT_START + ram_end - ram_start; + xenheap_mfn_start = ram_start >> PAGE_SHIFT; + xenheap_mfn_end = ram_end >> PAGE_SHIFT; + xenheap_max_mfn(xenheap_mfn_end); + + /* + * Need enough mapped pages for copying the DTB. + * + * TODO: The DTB (and other payloads) are assumed to be towards + * the start of RAM. + */ + dtb_pages = (dtb_size + PAGE_SIZE-1) >> PAGE_SHIFT; + + /* + * Copy the DTB. + * + * TODO: handle other payloads too. + */ + device_tree_flattened = mfn_to_virt(alloc_boot_pages(dtb_pages, 1)); + copy_from_paddr(device_tree_flattened, dtb_paddr, dtb_size, BUFFERABLE); + + setup_frametable_mappings(ram_start, ram_end); + max_page = PFN_DOWN(ram_end); + + end_boot_allocator(); +} +#endif size_t __read_mostly cacheline_bytes; diff --git a/xen/include/asm-arm/config.h b/xen/include/asm-arm/config.h index fb9e93c..259d4c6 100644 --- a/xen/include/asm-arm/config.h +++ b/xen/include/asm-arm/config.h @@ -35,9 +35,6 @@ #define CONFIG_SMP 1 -#define CONFIG_DOMAIN_PAGE 1 -#define CONFIG_SEPARATE_XENHEAP 1 - #define CONFIG_VIDEO 1 #define OPT_CONSOLE_STR "dtuart" @@ -76,38 +73,89 @@ #include <xen/const.h> /* - * Memory layout: - * 0 - 2M Unmapped - * 2M - 4M Xen text, data, bss - * 4M - 6M Fixmap: special-purpose 4K mapping slots - * 6M - 8M Early boot misc (see below) - * - * 32M - 128M Frametable: 24 bytes per page for 16GB of RAM - * 256M - 1G VMAP: ioremap and early_ioremap use this virtual address - * space - * - * 1G - 2G Xenheap: always-mapped memory - * 2G - 4G Domheap: on-demand-mapped + * Common ARM32 and ARM64 layout: + * 0 - 2M Unmapped + * 2M - 4M Xen text, data, bss + * 4M - 6M Fixmap: special-purpose 4K mapping slots + * 6M - 8M Early boot misc (see below) * * The early boot misc area is used: * - in head.S for the DTB for device_tree_early_init(). * - in setup_pagetables() when relocating Xen. + * + * ARM32 layout: + * 0 - 8M <COMMON> + * + * 32M - 128M Frametable: 24 bytes per page for 16GB of RAM + * 256M - 1G VMAP: ioremap and early_ioremap use this virtual address + * space + * + * 1G - 2G Xenheap: always-mapped memory + * 2G - 4G Domheap: on-demand-mapped + * + * ARM64 layout: + * 0x0000000000000000 - 0x0000007fffffffff (512GB, L0 slot [0]) + * 0 - 8M <COMMON> + * + * 1G - 2G VMAP: ioremap and early_ioremap + * + * 32G - 64G Frametable: 24 bytes per page for 5.3TB of RAM + * + * 0x0000008000000000 - 0x00007fffffffffff (127.5TB, L0 slots [1..255]) + * Unused + * + * 0x0000800000000000 - 0x000084ffffffffff (5TB, L0 slots [256..265]) + * 1:1 mapping of RAM + * + * 0x0000850000000000 - 0x0000ffffffffffff (123TB, L0 slots [266..511]) + * Unused */ -#define XEN_VIRT_START _AC(0x00200000,UL) -#define FIXMAP_ADDR(n) (_AC(0x00400000,UL) + (n) * PAGE_SIZE) -#define BOOT_MISC_VIRT_START _AC(0x00600000,UL) -#define FRAMETABLE_VIRT_START _AC(0x02000000,UL) -#define VMAP_VIRT_START _AC(0x10000000,UL) -#define XENHEAP_VIRT_START _AC(0x40000000,UL) -#define DOMHEAP_VIRT_START _AC(0x80000000,UL) -#define DOMHEAP_VIRT_END _AC(0xffffffff,UL) +#define XEN_VIRT_START _AT(vaddr_t,0x00200000) +#define FIXMAP_ADDR(n) (_AT(vaddr_t,0x00400000) + (n) * PAGE_SIZE) +#define BOOT_MISC_VIRT_START _AT(vaddr_t,0x00600000) -#define VMAP_VIRT_END XENHEAP_VIRT_START #define HYPERVISOR_VIRT_START XEN_VIRT_START +#ifdef CONFIG_ARM_32 + +#define CONFIG_DOMAIN_PAGE 1 +#define CONFIG_SEPARATE_XENHEAP 1 + +#define FRAMETABLE_VIRT_START _AT(vaddr_t,0x02000000) +#define VMAP_VIRT_START _AT(vaddr_t,0x10000000) +#define XENHEAP_VIRT_START _AT(vaddr_t,0x40000000) +#define XENHEAP_VIRT_END _AT(vaddr_t,0x7fffffff) +#define DOMHEAP_VIRT_START _AT(vaddr_t,0x80000000) +#define DOMHEAP_VIRT_END _AT(vaddr_t,0xffffffff) + +#define VMAP_VIRT_END XENHEAP_VIRT_START + #define DOMHEAP_ENTRIES 1024 /* 1024 2MB mapping slots */ +#else /* ARM_64 */ + +#define SLOT0_ENTRY_BITS 39 +#define SLOT0(slot) (_AT(vaddr_t,slot) << SLOT0_ENTRY_BITS) +#define SLOT0_ENTRY_SIZE SLOT0(1) +#define GB(_gb) (_AC(_gb, UL) << 30) + +#define VMAP_VIRT_START GB(1) +#define VMAP_VIRT_END (VMAP_VIRT_START + GB(1) - 1) + +#define FRAMETABLE_VIRT_START GB(32) +#define FRAMETABLE_VIRT_END (FRAMETABLE_VIRT_START + GB(32) - 1) + +#define DIRECTMAP_VIRT_START SLOT0(256) +#define DIRECTMAP_SIZE (SLOT0_ENTRY_SIZE * (265-256)) +#define DIRECTMAP_VIRT_END (DIRECTMAP_VIRT_START + DIRECTMAP_SIZE - 1) + +#define XENHEAP_VIRT_START DIRECTMAP_VIRT_START + +#define HYPERVISOR_VIRT_END DIRECTMAP_VIRT_END + +#endif + /* Number of domheap pagetable pages required at the second level (2MB mappings) */ #define DOMHEAP_SECOND_PAGES ((DOMHEAP_VIRT_END - DOMHEAP_VIRT_START + 1) >> FIRST_SHIFT) diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h index 7aca836..27284d0 100644 --- a/xen/include/asm-arm/mm.h +++ b/xen/include/asm-arm/mm.h @@ -118,11 +118,18 @@ struct page_info extern unsigned long xenheap_mfn_start, xenheap_mfn_end; extern unsigned long xenheap_virt_end; +#ifdef CONFIG_ARM_32 #define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page)) #define is_xen_heap_mfn(mfn) ({ \ unsigned long _mfn = (mfn); \ (_mfn >= xenheap_mfn_start && _mfn < xenheap_mfn_end); \ }) +#else +#define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap) +#define is_xen_heap_mfn(mfn) \ + (mfn_valid(mfn) && is_xen_heap_page(__mfn_to_page(mfn))) +#endif + #define is_xen_fixed_mfn(mfn) \ ((((mfn) << PAGE_SHIFT) >= virt_to_maddr(&_start)) && \ (((mfn) << PAGE_SHIFT) <= virt_to_maddr(&_end))) @@ -215,12 +222,21 @@ static inline paddr_t __virt_to_maddr(vaddr_t va) } #define virt_to_maddr(va) __virt_to_maddr((vaddr_t)(va)) +#ifdef CONFIG_ARM_32 static inline void *maddr_to_virt(paddr_t ma) { ASSERT(is_xen_heap_mfn(ma >> PAGE_SHIFT)); ma -= pfn_to_paddr(xenheap_mfn_start); return (void *)(unsigned long) ma + XENHEAP_VIRT_START; } +#else +static inline void *maddr_to_virt(paddr_t ma) +{ + ASSERT((ma >> PAGE_SHIFT) < (DIRECTMAP_SIZE >> PAGE_SHIFT)); + ma -= pfn_to_paddr(xenheap_mfn_start); + return (void *)(unsigned long) ma + DIRECTMAP_VIRT_START; +} +#endif static inline int gvirt_to_maddr(vaddr_t va, paddr_t *pa) { -- 1.7.2.5
Tim Deegan
2013-Aug-08 14:01 UTC
Re: [PATCH v3 02/10] xen: arm: Add zeroeth level page table macros and defines
At 13:15 +0100 on 08 Aug (1375967709), Ian Campbell wrote:> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>Acked-by: Tim Deegan <tim@xen.org>> --- > v3: Don''t ifdef rely on >>39 being invalid for 32-bit words > Update comment. > --- > xen/include/asm-arm/page.h | 19 +++++++++++++++---- > 1 files changed, 15 insertions(+), 4 deletions(-) > > diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h > index 41e9eff..93bb8c0 100644 > --- a/xen/include/asm-arm/page.h > +++ b/xen/include/asm-arm/page.h > @@ -309,9 +309,15 @@ static inline int gva_to_ipa(vaddr_t va, paddr_t *paddr) > > #endif /* __ASSEMBLY__ */ > > -/* These numbers add up to a 39-bit input address space. The ARMv7-A > - * architecture actually specifies a 40-bit input address space for the p2m, > - * with an 8K (1024-entry) top-level table. */ > +/* > + * These numbers add up to a 48-bit input address space. > + * > + * On 32-bit the zeroeth level does not exist, therefore the total is > + * 39-bits. The ARMv7-A architecture actually specifies a 40-bit input > + * address space for the p2m, with an 8K (1024-entry) top-level table. > + * However Xen only supports 16GB of RAM on 32-bit ARM systems and > + * therefore 39-bits are sufficient. > + */ > > #define LPAE_SHIFT 9 > #define LPAE_ENTRIES (1u << LPAE_SHIFT) > @@ -326,8 +332,12 @@ static inline int gva_to_ipa(vaddr_t va, paddr_t *paddr) > #define FIRST_SHIFT (SECOND_SHIFT + LPAE_SHIFT) > #define FIRST_SIZE (1u << FIRST_SHIFT) > #define FIRST_MASK (~(FIRST_SIZE - 1)) > +#define ZEROETH_SHIFT (FIRST_SHIFT + LPAE_SHIFT) > +#define ZEROETH_SIZE (1u << ZEROETH_SHIFT) > +#define ZEROETH_MASK (~(ZEROETH_SIZE - 1)) > > /* Calculate the offsets into the pagetables for a given VA */ > +#define zeroeth_linear_offset(va) ((va) >> ZEROETH_SHIFT) > #define first_linear_offset(va) ((va) >> FIRST_SHIFT) > #define second_linear_offset(va) ((va) >> SECOND_SHIFT) > #define third_linear_offset(va) ((va) >> THIRD_SHIFT) > @@ -336,8 +346,9 @@ static inline int gva_to_ipa(vaddr_t va, paddr_t *paddr) > #define first_table_offset(va) TABLE_OFFSET(first_linear_offset(va)) > #define second_table_offset(va) TABLE_OFFSET(second_linear_offset(va)) > #define third_table_offset(va) TABLE_OFFSET(third_linear_offset(va)) > +#define zeroeth_table_offset(va) TABLE_OFFSET(zeroeth_linear_offset(va)) > > -#define clear_page(page)memset((void *)(page), 0, PAGE_SIZE) > +#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) > > #define PAGE_ALIGN(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK) > > -- > 1.7.2.5 > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xen.org > http://lists.xen.org/xen-devel
Ian Campbell
2013-Aug-20 15:02 UTC
Re: [PATCH v3 00/10] xen: arm: direct 1:1 map on 64 bit
On Thu, 2013-08-08 at 13:14 +0100, Ian Campbell wrote:> Hopefully the last iteration of this series. The only change this time > round is to the patch adding the zeroeth level pt accessors, to remove > the ifdef and to update the comment. Everything else has been acked > (thanks all).Tim acked the remaining patch (thanks) -> applied.