Jan Beulich
2012-Sep-11 11:37 UTC
[PATCH 2/2] x86-64: construct static, uniform parts of page tables at build time
... rather than at boot time, removing unnecessary redundancy between EFI and legacy boot code. Signed-off-by: Jan Beulich <jbeulich@suse.com> --- a/xen/arch/x86/boot/head.S +++ b/xen/arch/x86/boot/head.S @@ -123,46 +123,19 @@ __start: /* Check for availability of long mode. */ bt $29,%edx jnc bad_cpu - /* Initialise L2 identity-map and xen page table entries (16MB). */ - mov $sym_phys(l2_xenmap),%esi + /* Initialise L2 boot-map page table entries (16MB). */ mov $sym_phys(l2_bootmap),%edx - mov $0x1e3,%eax /* PRESENT+RW+A+D+2MB+GLOBAL */ + mov $PAGE_HYPERVISOR|_PAGE_PSE,%eax mov $8,%ecx -1: mov %eax,(%esi) - add $8,%esi - mov %eax,(%edx) +1: mov %eax,(%edx) add $8,%edx add $(1<<L2_PAGETABLE_SHIFT),%eax loop 1b - /* Initialise L2 fixmap page directory entry. */ - mov $(sym_phys(l1_fixmap)+7),%eax - mov %eax,sym_phys(l2_fixmap) + l2_table_offset(FIXADDR_TOP-1)*8 - /* Initialise L3 identity-map page directory entries. */ - mov $sym_phys(l3_identmap),%edi - mov $(sym_phys(l2_identmap)+7),%eax - mov $4,%ecx -1: mov %eax,(%edi) - add $8,%edi - add $PAGE_SIZE,%eax - loop 1b - /* Initialise L3 xen-map and fixmap page directory entries. */ - mov $(sym_phys(l2_xenmap)+7),%eax - mov %eax,sym_phys(l3_xenmap) + l3_table_offset(XEN_VIRT_START)*8 - mov $(sym_phys(l2_fixmap)+7),%eax - mov %eax,sym_phys(l3_xenmap) + l3_table_offset(FIXADDR_TOP-1)*8 /* Initialise L3 boot-map page directory entry. */ - mov $(sym_phys(l2_bootmap)+7),%eax + mov $sym_phys(l2_bootmap)+__PAGE_HYPERVISOR,%eax mov %eax,sym_phys(l3_bootmap) + 0*8 - /* Hook identity-map, xen-map, and boot-map L3 tables into PML4. */ - mov $(sym_phys(l3_bootmap)+7),%eax - mov %eax,sym_phys(idle_pg_table) + 0*8 - mov $(sym_phys(l3_identmap)+7),%eax - mov %eax,sym_phys(idle_pg_table) + l4_table_offset(DIRECTMAP_VIRT_START)*8 - mov $(sym_phys(l3_xenmap)+7),%eax - mov %eax,sym_phys(idle_pg_table) + l4_table_offset(XEN_VIRT_START)*8 /* Hook 4kB mappings of first 2MB of memory into L2. */ mov $sym_phys(l1_identmap)+__PAGE_HYPERVISOR,%edi - mov %edi,sym_phys(l2_identmap) mov %edi,sym_phys(l2_xenmap) mov %edi,sym_phys(l2_bootmap) #endif --- a/xen/arch/x86/boot/x86_64.S +++ b/xen/arch/x86/boot/x86_64.S @@ -128,10 +128,13 @@ ENTRY(boot_cpu_compat_gdt_table) .quad 0x0000910000000000 /* per-CPU entry (limit == cpu) */ .align PAGE_SIZE, 0 + .globl __page_tables_start, __page_tables_end +__page_tables_start: + /* Mapping of first 16 megabytes of memory. */ .globl l2_identmap l2_identmap: - .quad 0 + .quad sym_phys(l1_identmap) + __PAGE_HYPERVISOR pfn = 0 .rept 7 pfn = pfn + (1 << PAGETABLE_ORDER) @@ -139,3 +142,68 @@ l2_identmap: .endr .fill 4 * L2_PAGETABLE_ENTRIES - 8, 8, 0 .size l2_identmap, . - l2_identmap + + .globl l2_xenmap +l2_xenmap: + idx = 0 + .rept 8 + .quad sym_phys(__image_base__) + (idx << L2_PAGETABLE_SHIFT) + (PAGE_HYPERVISOR | _PAGE_PSE) + idx = idx + 1 + .endr + .fill L2_PAGETABLE_ENTRIES - 8, 8, 0 + .size l2_xenmap, . - l2_xenmap + +l2_fixmap: + idx = 0 + .rept L2_PAGETABLE_ENTRIES + .if idx == l2_table_offset(FIXADDR_TOP - 1) + .quad sym_phys(l1_fixmap) + __PAGE_HYPERVISOR + .else + .quad 0 + .endif + idx = idx + 1 + .endr + .size l2_fixmap, . - l2_fixmap + + .globl l3_identmap +l3_identmap: + idx = 0 + .rept 4 + .quad sym_phys(l2_identmap) + (idx << PAGE_SHIFT) + __PAGE_HYPERVISOR + idx = idx + 1 + .endr + .fill L3_PAGETABLE_ENTRIES - 4, 8, 0 + .size l3_identmap, . - l3_identmap + +l3_xenmap: + idx = 0 + .rept L3_PAGETABLE_ENTRIES + .if idx == l3_table_offset(XEN_VIRT_START) + .quad sym_phys(l2_xenmap) + __PAGE_HYPERVISOR + .elseif idx == l3_table_offset(FIXADDR_TOP - 1) + .quad sym_phys(l2_fixmap) + __PAGE_HYPERVISOR + .else + .quad 0 + .endif + idx = idx + 1 + .endr + .size l3_xenmap, . - l3_xenmap + +/* Top-level master (and idle-domain) page directory. */ + .globl idle_pg_table +idle_pg_table: + .quad sym_phys(l3_bootmap) + __PAGE_HYPERVISOR + idx = 1 + .rept L4_PAGETABLE_ENTRIES - 1 + .if idx == l4_table_offset(DIRECTMAP_VIRT_START) + .quad sym_phys(l3_identmap) + __PAGE_HYPERVISOR + .elseif idx == l4_table_offset(XEN_VIRT_START) + .quad sym_phys(l3_xenmap) + __PAGE_HYPERVISOR + .else + .quad 0 + .endif + idx = idx + 1 + .endr + .size idle_pg_table, . - idle_pg_table + +__page_tables_end: --- a/xen/arch/x86/efi/boot.c +++ b/xen/arch/x86/efi/boot.c @@ -573,6 +573,10 @@ static int __init set_color(u32 mask, in return max(*pos + *sz, bpp); } +extern const intpte_t __page_tables_start[], __page_tables_end[]; +#define in_page_tables(v) ((intpte_t *)(v) >= __page_tables_start && \ + (intpte_t *)(v) < __page_tables_end) + #define PE_BASE_RELOC_ABS 0 #define PE_BASE_RELOC_HIGHLOW 3 #define PE_BASE_RELOC_DIR64 10 @@ -604,11 +608,19 @@ static void __init relocate_image(unsign break; case PE_BASE_RELOC_HIGHLOW: if ( delta ) + { *(u32 *)addr += delta; + if ( in_page_tables(addr) ) + *(u32 *)addr += xen_phys_start; + } break; case PE_BASE_RELOC_DIR64: if ( delta ) + { *(u64 *)addr += delta; + if ( in_page_tables(addr) ) + *(intpte_t *)addr += xen_phys_start; + } break; default: blexit(L"Unsupported relocation type\r\n"); @@ -1113,43 +1125,21 @@ efi_start(EFI_HANDLE ImageHandle, EFI_SY *(u16 *)(*trampoline_ptr + (long)trampoline_ptr) trampoline_phys >> 4; - /* Initialise L2 identity-map and xen page table entries (16MB). */ + /* Initialise L2 identity-map and boot-map page table entries (16MB). */ for ( i = 0; i < 8; ++i ) { unsigned int slot = (xen_phys_start >> L2_PAGETABLE_SHIFT) + i; paddr_t addr = slot << L2_PAGETABLE_SHIFT; l2_identmap[slot] = l2e_from_paddr(addr, PAGE_HYPERVISOR|_PAGE_PSE); - l2_xenmap[i] = l2e_from_paddr(addr, PAGE_HYPERVISOR|_PAGE_PSE); slot &= L2_PAGETABLE_ENTRIES - 1; l2_bootmap[slot] = l2e_from_paddr(addr, __PAGE_HYPERVISOR|_PAGE_PSE); } - /* Initialise L2 fixmap page directory entry. */ - l2_fixmap[l2_table_offset(FIXADDR_TOP - 1)] - l2e_from_paddr((UINTN)l1_fixmap, __PAGE_HYPERVISOR); - /* Initialise L3 identity-map page directory entries. */ - for ( i = 0; i < ARRAY_SIZE(l2_identmap) / L2_PAGETABLE_ENTRIES; ++i ) - l3_identmap[i] = l3e_from_paddr((UINTN)(l2_identmap + - i * L2_PAGETABLE_ENTRIES), - __PAGE_HYPERVISOR); - /* Initialise L3 xen-map and fixmap page directory entries. */ - l3_xenmap[l3_table_offset(XEN_VIRT_START)] - l3e_from_paddr((UINTN)l2_xenmap, __PAGE_HYPERVISOR); - l3_xenmap[l3_table_offset(FIXADDR_TOP - 1)] - l3e_from_paddr((UINTN)l2_fixmap, __PAGE_HYPERVISOR); /* Initialise L3 boot-map page directory entries. */ l3_bootmap[l3_table_offset(xen_phys_start)] l3e_from_paddr((UINTN)l2_bootmap, __PAGE_HYPERVISOR); l3_bootmap[l3_table_offset(xen_phys_start + (8 << L2_PAGETABLE_SHIFT) - 1)] l3e_from_paddr((UINTN)l2_bootmap, __PAGE_HYPERVISOR); - /* Hook identity-map, xen-map, and boot-map L3 tables into PML4. */ - idle_pg_table[0] = l4e_from_paddr((UINTN)l3_bootmap, __PAGE_HYPERVISOR); - idle_pg_table[l4_table_offset(DIRECTMAP_VIRT_START)] - l4e_from_paddr((UINTN)l3_identmap, __PAGE_HYPERVISOR); - idle_pg_table[l4_table_offset(XEN_VIRT_START)] - l4e_from_paddr((UINTN)l3_xenmap, __PAGE_HYPERVISOR); - /* Hook 4kB mappings of first 2MB of memory into L2. */ - l2_identmap[0] = l2e_from_paddr((UINTN)l1_identmap, __PAGE_HYPERVISOR); if ( gop ) { --- a/xen/arch/x86/x86_64/mm.c +++ b/xen/arch/x86/x86_64/mm.c @@ -49,24 +49,6 @@ unsigned int __read_mostly pfn_pdx_hole_ unsigned int __read_mostly m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START; -/* Top-level master (and idle-domain) page directory. */ -l4_pgentry_t __attribute__ ((__section__ (".bss.page_aligned"))) - idle_pg_table[L4_PAGETABLE_ENTRIES]; - -/* Enough page directories to map bottom 4GB of the memory map. */ -l3_pgentry_t __attribute__ ((__section__ (".bss.page_aligned"))) - l3_identmap[L3_PAGETABLE_ENTRIES]; - -/* Enough page directories to map the Xen text and static data. */ -l3_pgentry_t __attribute__ ((__section__ (".bss.page_aligned"))) - l3_xenmap[L3_PAGETABLE_ENTRIES]; -l2_pgentry_t __attribute__ ((__section__ (".bss.page_aligned"))) - l2_xenmap[L2_PAGETABLE_ENTRIES]; - -/* Enough page directories to map the early fixmap space. */ -l2_pgentry_t __attribute__ ((__section__ (".bss.page_aligned"))) - l2_fixmap[L2_PAGETABLE_ENTRIES]; - /* Enough page directories to map into the bottom 1GB. */ l3_pgentry_t __attribute__ ((__section__ (".bss.page_aligned"))) l3_bootmap[L3_PAGETABLE_ENTRIES]; --- a/xen/arch/x86/xen.lds.S +++ b/xen/arch/x86/xen.lds.S @@ -42,6 +42,10 @@ PHDRS } SECTIONS { +#if defined(__x86_64__) && !defined(EFI) + . = __XEN_VIRT_START; + __image_base__ = .; +#endif . = __XEN_VIRT_START + 0x100000; _start = .; .text : { --- a/xen/include/asm-x86/page.h +++ b/xen/include/asm-x86/page.h @@ -304,11 +304,8 @@ extern l2_pgentry_t idle_pg_table_l2[ extern l2_pgentry_t *compat_idle_pg_table_l2; extern unsigned int m2p_compat_vstart; extern l2_pgentry_t l2_xenmap[L2_PAGETABLE_ENTRIES], - l2_fixmap[L2_PAGETABLE_ENTRIES], l2_bootmap[L2_PAGETABLE_ENTRIES]; -extern l3_pgentry_t l3_xenmap[L3_PAGETABLE_ENTRIES], - l3_identmap[L3_PAGETABLE_ENTRIES], - l3_bootmap[L3_PAGETABLE_ENTRIES]; +extern l3_pgentry_t l3_bootmap[L3_PAGETABLE_ENTRIES]; #endif extern l2_pgentry_t l2_identmap[4*L2_PAGETABLE_ENTRIES]; extern l1_pgentry_t l1_identmap[L1_PAGETABLE_ENTRIES], _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel