Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 01/46] xen: arm32: Don''t bother with the bootloader provided ARM-Linux machine type
Everything is DTB based and on 64-bit there is no such concept even in Linux. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- v2: Update start_secondary too. --- xen/arch/arm/arm32/head.S | 7 +++---- xen/arch/arm/setup.c | 1 - xen/arch/arm/smpboot.c | 1 - 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S index 20e9da6..92fc36c 100644 --- a/xen/arch/arm/arm32/head.S +++ b/xen/arch/arm/arm32/head.S @@ -72,7 +72,7 @@ past_zImage: cpsid aif /* Disable all interrupts */ /* Save the bootloader arguments in less-clobberable registers */ - mov r7, r1 /* r7 := ARM-linux machine type */ + /* No need to save r1 == Unused ARM-linux machine type */ mov r8, r2 /* r8 := ATAG base address */ /* Find out where we are */ @@ -334,9 +334,8 @@ launch: add sp, #STACK_SIZE /* (which grows down from the top). */ sub sp, #CPUINFO_sizeof /* Make room for CPU save record */ mov r0, r10 /* Marshal args: - phys_offset */ - mov r1, r7 /* - machine type */ - mov r2, r8 /* - ATAG address */ - movs r3, r12 /* - CPU ID */ + mov r1, r8 /* - ATAG address */ + movs r2, r12 /* - CPU ID */ beq start_xen /* and disappear into the land of C */ b start_secondary /* (to the appropriate entry point) */ diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c index acb7abb..782d252 100644 --- a/xen/arch/arm/setup.c +++ b/xen/arch/arm/setup.c @@ -329,7 +329,6 @@ void __init setup_cache(void) /* C entry point for boot CPU */ void __init start_xen(unsigned long boot_phys_offset, - unsigned long arm_type, unsigned long atag_paddr, unsigned long cpuid) { diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c index c7a586b..da4880c 100644 --- a/xen/arch/arm/smpboot.c +++ b/xen/arch/arm/smpboot.c @@ -132,7 +132,6 @@ make_cpus_ready(unsigned int max_cpus, unsigned long boot_phys_offset) /* Boot the current CPU */ void __cpuinit start_secondary(unsigned long boot_phys_offset, - unsigned long arm_type, unsigned long atag_paddr, unsigned long cpuid) { -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 02/46] xen: arm: rename atag_paddr argument fdt_paddr
We don''t support ATAGs and this is always actually an FDT address. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- v2: Update start_secondary too, s/ATAG/DTB in head.S comments --- xen/arch/arm/arm32/head.S | 4 ++-- xen/arch/arm/setup.c | 6 +++--- xen/arch/arm/smpboot.c | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S index 92fc36c..5ec46c3 100644 --- a/xen/arch/arm/arm32/head.S +++ b/xen/arch/arm/arm32/head.S @@ -73,7 +73,7 @@ past_zImage: /* Save the bootloader arguments in less-clobberable registers */ /* No need to save r1 == Unused ARM-linux machine type */ - mov r8, r2 /* r8 := ATAG base address */ + mov r8, r2 /* r8 := DTB base address */ /* Find out where we are */ ldr r0, =start @@ -334,7 +334,7 @@ launch: add sp, #STACK_SIZE /* (which grows down from the top). */ sub sp, #CPUINFO_sizeof /* Make room for CPU save record */ mov r0, r10 /* Marshal args: - phys_offset */ - mov r1, r8 /* - ATAG address */ + mov r1, r8 /* - DTB address */ movs r2, r12 /* - CPU ID */ beq start_xen /* and disappear into the land of C */ b start_secondary /* (to the appropriate entry point) */ diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c index 782d252..4e50b2b 100644 --- a/xen/arch/arm/setup.c +++ b/xen/arch/arm/setup.c @@ -329,7 +329,7 @@ void __init setup_cache(void) /* C entry point for boot CPU */ void __init start_xen(unsigned long boot_phys_offset, - unsigned long atag_paddr, + unsigned long fdt_paddr, unsigned long cpuid) { void *fdt; @@ -341,7 +341,7 @@ void __init start_xen(unsigned long boot_phys_offset, smp_clear_cpu_maps(); fdt = (void *)BOOT_MISC_VIRT_START - + (atag_paddr & ((1 << SECOND_SHIFT) - 1)); + + (fdt_paddr & ((1 << SECOND_SHIFT) - 1)); fdt_size = device_tree_early_init(fdt); cpus = smp_get_max_cpus(); @@ -365,7 +365,7 @@ void __init start_xen(unsigned long boot_phys_offset, set_current((struct vcpu *)0xfffff000); /* debug sanity */ idle_vcpu[0] = current; - setup_mm(atag_paddr, fdt_size); + setup_mm(fdt_paddr, fdt_size); /* Setup Hyp vector base */ WRITE_CP32((uint32_t) hyp_traps_vector, HVBAR); diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c index da4880c..60be1a4 100644 --- a/xen/arch/arm/smpboot.c +++ b/xen/arch/arm/smpboot.c @@ -132,7 +132,7 @@ make_cpus_ready(unsigned int max_cpus, unsigned long boot_phys_offset) /* Boot the current CPU */ void __cpuinit start_secondary(unsigned long boot_phys_offset, - unsigned long atag_paddr, + unsigned long fdt_paddr, unsigned long cpuid) { memset(get_cpu_info(), 0, sizeof (struct cpu_info)); -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 03/46] xen: arm: do not pass a machine ID to dom0
Xen relies on DTB and we pass in a suitable device-tree so we don''t need to (and shouldn''t) pretend to be a Versatile Express here. We already don''t pass a machine ID to domU in the same way. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- xen/arch/arm/domain_build.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c index 6abbb03..7403f1a 100644 --- a/xen/arch/arm/domain_build.c +++ b/xen/arch/arm/domain_build.c @@ -349,7 +349,7 @@ int construct_dom0(struct domain *d) */ regs->r0 = 0; /* SBZ */ - regs->r1 = 2272; /* Machine NR: Versatile Express */ + regs->r1 = 0xffffffff; /* We use DTB therefore no machine id */ regs->r2 = kinfo.dtb_paddr; WRITE_CP32(SCTLR_BASE, SCTLR); -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 04/46] arm: avoid inline asm for dsb, isb, wfi and sev.
"dsb" must be written "dsb sy" on arm64. "dsb sy" is also valid (and synonymous) on arm32 but we have a macro so lets use it. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- xen/arch/arm/domain.c | 5 ++++- xen/arch/arm/smpboot.c | 10 ++++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c index e37ec54..e7d3ec6 100644 --- a/xen/arch/arm/domain.c +++ b/xen/arch/arm/domain.c @@ -29,7 +29,10 @@ void idle_loop(void) local_irq_disable(); if ( cpu_is_haltable(smp_processor_id()) ) - asm volatile ("dsb; wfi"); + { + dsb(); + wfi(); + } local_irq_enable(); do_tasklet(); diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c index 60be1a4..86379b7 100644 --- a/xen/arch/arm/smpboot.c +++ b/xen/arch/arm/smpboot.c @@ -122,7 +122,8 @@ make_cpus_ready(unsigned int max_cpus, unsigned long boot_phys_offset) /* TODO: handle boards where CPUIDs are not contiguous */ *gate = i; flush_xen_dcache(*gate); - asm volatile("isb; sev"); + isb(); + sev(); /* And wait for it to respond */ while ( ready_cpus < i ) smp_rmb(); @@ -204,8 +205,8 @@ void stop_cpu(void) /* Make sure the write happens before we sleep forever */ dsb(); isb(); - while ( 1 ) - asm volatile("wfi"); + while ( 1 ) + wfi(); } /* Bring up a remote CPU */ @@ -220,7 +221,8 @@ int __cpu_up(unsigned int cpu) /* we need to make sure that the change to smp_up_cpu is visible to * secondary cpus with D-cache off */ flush_xen_dcache(smp_up_cpu); - asm volatile("isb; sev"); + isb(); + sev(); while ( !cpu_online(cpu) ) { -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 05/46] xen: arm64: initial build + config changes, start of day code
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- v2: - Add PSR_MODE definitions for 64-bit to arch-arm.h and use instead of defining in head.S - Nuke hard tabs in head.S and mode_switch.S with expand(1) --- Config.mk | 2 +- config/arm64.mk | 12 ++ xen/arch/arm/Makefile | 1 + xen/arch/arm/Rules.mk | 6 + xen/arch/arm/arm64/Makefile | 1 + xen/arch/arm/arm64/head.S | 394 ++++++++++++++++++++++++++++++++++++++ xen/arch/arm/arm64/mode_switch.S | 83 ++++++++ xen/arch/arm/xen.lds.S | 8 +- xen/include/asm-arm/page.h | 1 + xen/include/public/arch-arm.h | 14 ++ xen/include/public/hvm/save.h | 2 +- xen/include/public/xen.h | 2 +- xen/include/xen/libelf.h | 2 +- 13 files changed, 523 insertions(+), 5 deletions(-) create mode 100644 config/arm64.mk create mode 100644 xen/arch/arm/arm64/Makefile create mode 100644 xen/arch/arm/arm64/head.S create mode 100644 xen/arch/arm/arm64/mode_switch.S diff --git a/Config.mk b/Config.mk index 64541c8..ea64925 100644 --- a/Config.mk +++ b/Config.mk @@ -15,7 +15,7 @@ debug_symbols ?= $(debug) XEN_COMPILE_ARCH ?= $(shell uname -m | sed -e s/i.86/x86_32/ \ -e s/i86pc/x86_32/ -e s/amd64/x86_64/ \ - -e s/armv7.*/arm32/) + -e s/armv7.*/arm32/ -e s/armv8.*/arm64/) XEN_TARGET_ARCH ?= $(XEN_COMPILE_ARCH) XEN_OS ?= $(shell uname -s) diff --git a/config/arm64.mk b/config/arm64.mk new file mode 100644 index 0000000..b2457eb --- /dev/null +++ b/config/arm64.mk @@ -0,0 +1,12 @@ +CONFIG_ARM := y +CONFIG_ARM_64 := y +CONFIG_ARM_$(XEN_OS) := y + +CFLAGS += #-marm -march= -mcpu= etc + +HAS_PL011 := y + +# Use only if calling $(LD) directly. +LDFLAGS_DIRECT += -maarch64elf + +CONFIG_LOAD_ADDRESS ?= 0x80000000 diff --git a/xen/arch/arm/Makefile b/xen/arch/arm/Makefile index f2822f2..7ff67c7 100644 --- a/xen/arch/arm/Makefile +++ b/xen/arch/arm/Makefile @@ -1,4 +1,5 @@ subdir-$(arm32) += arm32 +subdir-$(arm64) += arm64 obj-y += early_printk.o obj-y += domain.o diff --git a/xen/arch/arm/Rules.mk b/xen/arch/arm/Rules.mk index 5b5768a..29b605d 100644 --- a/xen/arch/arm/Rules.mk +++ b/xen/arch/arm/Rules.mk @@ -26,6 +26,12 @@ arm32 := y arm64 := n endif +ifeq ($(TARGET_SUBARCH),arm64) +CFLAGS += -mcpu=generic +arm32 := n +arm64 := y +endif + ifneq ($(call cc-option,$(CC),-fvisibility=hidden,n),n) CFLAGS += -DGCC_HAS_VISIBILITY_ATTRIBUTE endif diff --git a/xen/arch/arm/arm64/Makefile b/xen/arch/arm/arm64/Makefile new file mode 100644 index 0000000..dffbeb1 --- /dev/null +++ b/xen/arch/arm/arm64/Makefile @@ -0,0 +1 @@ +obj-y += mode_switch.o diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S new file mode 100644 index 0000000..847043b --- /dev/null +++ b/xen/arch/arm/arm64/head.S @@ -0,0 +1,394 @@ +/* + * xen/arch/arm/head.S + * + * Start-of-day code for an ARMv8. + * + * Ian Campbell <ian.campbell@citrix.com> + * Copyright (c) 2012 Citrix Systems. + * + * Based on ARMv7-A head.S by + * Tim Deegan <tim@xen.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <asm/config.h> +#include <asm/page.h> +#include <asm/asm_defns.h> + +#define PT_PT 0xe7f /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=111 T=1 P=1 */ +#define PT_MEM 0xe7d /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=111 T=0 P=1 */ +#define PT_DEV 0xe71 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=0 P=1 */ +#define PT_DEV_L3 0xe73 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=1 P=1 */ + +/* Macro to print a string to the UART, if there is one. + * Clobbers r0-r3. */ +#ifdef EARLY_UART_ADDRESS +#define PRINT(_s) \ + adr x0, 98f ; \ + bl puts ; \ + b 99f ; \ +98: .asciz _s ; \ + .align 2 ; \ +99: +#else +#define PRINT(s) +#endif + + /*.aarch64*/ + + /* + * Kernel startup entry point. + * --------------------------- + * + * The requirements are: + * MMU = off, D-cache = off, I-cache = on or off, + * x0 = physical address to the FDT blob. + * + * This must be the very first address in the loaded image. + * It should be linked at XEN_VIRT_START, and loaded at any + * 2MB-aligned address. All of text+data+bss must fit in 2MB, + * or the initial pagetable code below will need adjustment. + */ + + .global start +start: + /* + * DO NOT MODIFY. Image header expected by Linux boot-loaders. + */ + b real_start /* branch to kernel start, magic */ + .long 0 /* reserved */ + .quad 0 /* Image load offset from start of RAM */ + .quad 0 /* reserved */ + .quad 0 /* reserved */ + +real_start: + msr DAIFSet, 0xf /* Disable all interrupts */ + + /* Save the bootloader arguments in less-clobberable registers */ + mov x21, x0 /* x21 := DTB, physical address */ + + /* Find out where we are */ + ldr x0, =start + adr x19, start /* x19 := paddr (start) */ + sub x20, x19, x0 /* x20 := phys-offset */ + + /* Using the DTB in the .dtb section? */ +#ifdef CONFIG_DTB_FILE + ldr x21, =_sdtb + add x21, x21, x20 /* x21 := paddr(DTB) */ +#endif + + /* Are we the boot CPU? */ + mov x22, #0 /* x22 := CPU ID */ + mrs x0, mpidr_el1 + tbz x0, 31, boot_cpu /* Multiprocessor extension supported? */ + tbnz x0, 30, boot_cpu /* Uniprocessor system? */ + + mov x13, #(0xff << 24) + bics x22, x0, x13 /* Mask out flags to get CPU ID */ + b.eq boot_cpu /* If we''re CPU 0, boot now */ + + /* Non-boot CPUs wait here to be woken up one at a time. */ +1: dsb sy + ldr x0, =smp_up_cpu /* VA of gate */ + add x0, x0, x20 /* PA of gate */ + ldr x1, [x0] /* Which CPU is being booted? */ + cmp x1, x22 /* Is it us? */ + b.eq 2f + wfe + b 1b +2: + +boot_cpu: +#ifdef EARLY_UART_ADDRESS + ldr x23, =EARLY_UART_ADDRESS /* x23 := UART base address */ + cbnz x22, 1f + bl init_uart /* CPU 0 sets up the UART too */ +1: PRINT("- CPU ") + mov x0, x22 + bl putn + PRINT(" booting -\r\n") +#endif + + PRINT("- Current EL ") + mrs x0, CurrentEL + bl putn + PRINT(" -\r\n") + + /* Are we in EL3 */ + mrs x0, CurrentEL + cmp x0, #PSR_MODE_EL3t + ccmp x0, #PSR_MODE_EL3h, #0x4, ne + b.eq 1f /* Yes */ + + /* Are we in EL2 */ + cmp x0, #PSR_MODE_EL2t + ccmp x0, #PSR_MODE_EL2h, #0x4, ne + b.eq 2f /* Yes */ + + /* Otherwise, it must have been EL0 or EL1 */ + PRINT("- CPU is not in EL3 or EL2 -\r\n") + b fail + +1: PRINT("- Started in EL3 -\r\n- Entering EL2 -\r\n") + ldr x1, =enter_el2_mode /* VA of function */ + add x1, x1, x20 /* PA of function */ + adr x30, hyp /* Set return address for call */ + br x1 /* Call function */ + +2: PRINT("- Started in Hyp mode -\r\n") + +hyp: + /* Zero BSS On the boot CPU to avoid nasty surprises */ + cbnz x22, skip_bss + + PRINT("- Zero BSS -\r\n") + ldr x0, =__bss_start /* Load start & end of bss */ + ldr x1, =__bss_end + add x0, x0, x20 /* Apply physical offset */ + add x1, x1, x20 + +1: str xzr, [x0], #8 + cmp x0, x1 + b.lo 1b + +skip_bss: + + PRINT("- Setting up control registers -\r\n") + + /* Set up memory attribute type tables */ + ldr x0, =MAIRVAL + msr mair_el2, x0 + + /* Set up the HTCR: + * PASize -- 4G + * Top byte is used + * PT walks use Outer-Shareable accesses, + * PT walks are write-back, no-write-allocate in both cache levels, + * Full 64-bit address space goes through this table. */ + ldr x0, =0x80802500 + msr tcr_el2, x0 + + /* Set up the HSCTLR: + * Exceptions in LE ARM, + * Low-latency IRQs disabled, + * Write-implies-XN disabled (for now), + * D-cache disabled (for now), + * I-cache enabled, + * Alignment checking enabled, + * MMU translation disabled (for now). */ + ldr x0, =(HSCTLR_BASE|SCTLR_A) + msr SCTLR_EL2, x0 + + /* Write Xen''s PT''s paddr into the HTTBR */ + ldr x4, =xen_pgtable + add x4, x4, x20 /* x4 := paddr (xen_pagetable) */ + msr TTBR0_EL2, x4 + + /* Non-boot CPUs don''t need to rebuild the pagetable */ + cbnz x22, pt_ready + + ldr x1, =xen_first + add x1, x1, x20 /* x1 := paddr (xen_first) */ + mov x3, #PT_PT /* x2 := table map of xen_first */ + orr x2, x1, x3 /* (+ rights for linear PT) */ + str x2, [x4, #0] /* Map it in slot 0 */ + + mov x4, x1 /* Next level into xen_first */ + + /* console fixmap */ +#ifdef EARLY_UART_ADDRESS + ldr x1, =xen_fixmap + add x1, x1, x20 /* x1 := paddr (xen_fixmap) */ + lsr x2, x23, #12 + lsl x2, x2, #12 /* 4K aligned paddr of UART */ + mov x3, #PT_DEV_L3 + orr x2, x2, x3 /* x2 := 4K dev map including UART */ + str x2, [x1, #(FIXMAP_CONSOLE*8)] /* Map it in the first fixmap''s slot */ +#endif + + /* Build the baseline idle pagetable''s first-level entries */ + ldr x1, =xen_second + add x1, x1, x20 /* x1 := paddr (xen_second) */ + mov x3, #PT_PT /* x2 := table map of xen_second */ + orr x2, x1, x3 /* (+ rights for linear PT) */ + str x2, [x4, #0] /* Map it in slot 0 */ + add x2, x2, #0x1000 + str x2, [x4, #8] /* Map 2nd page in slot 1 */ + add x2, x2, #0x1000 + str x2, [x4, #16] /* Map 3rd page in slot 2 */ + add x2, x2, #0x1000 + str x2, [x4, #24] /* Map 4th page in slot 3 */ + + /* Now set up the second-level entries */ + mov x3, #PT_MEM + orr x2, x19, x3 /* x2 := 2MB normal map of Xen */ + orr x4, xzr, x19, lsr #18 + str x2, [x1, x4] /* Map Xen there */ + ldr x4, =start + lsr x4, x4, #18 /* Slot for vaddr(start) */ + str x2, [x1, x4] /* Map Xen there too */ + + /* xen_fixmap pagetable */ + ldr x2, =xen_fixmap + add x2, x2, x20 /* x2 := paddr (xen_fixmap) */ + mov x3, #PT_PT + orr x2, x2, x3 /* x2 := table map of xen_fixmap */ + add x4, x4, #8 + str x2, [x1, x4] /* Map it in the fixmap''s slot */ + + lsr x2, x21, #21 + lsl x2, x2, #21 /* 2MB-aligned paddr of DTB */ + mov x3, #PT_MEM /* x2 := 2MB RAM incl. DTB */ + orr x2, x2, x3 + add x4, x4, #8 + str x2, [x1, x4] /* Map it in the early boot slot */ + +pt_ready: + PRINT("- Turning on paging -\r\n") + + ldr x1, =paging /* Explicit vaddr, not RIP-relative */ + mrs x0, SCTLR_EL2 + orr x0, x0, #SCTLR_M /* Enable MMU */ + orr x0, x0, #SCTLR_C /* Enable D-cache */ + dsb sy /* Flush PTE writes and finish reads */ + msr SCTLR_EL2, x0 /* now paging is enabled */ + isb /* Now, flush the icache */ + br x1 /* Get a proper vaddr into PC */ +paging: + +#ifdef EARLY_UART_ADDRESS + /* Use a virtual address to access the UART. */ + ldr x23, =FIXMAP_ADDR(FIXMAP_CONSOLE) +#endif + + PRINT("- Ready -\r\n") + + /* The boot CPU should go straight into C now */ + cbz x22, launch + + /* Non-boot CPUs need to move on to the relocated pagetables */ + //mov x0, #0 + ldr x4, =boot_ttbr /* VA of TTBR0_EL2 stashed by CPU 0 */ + add x4, x4, x20 /* PA of it */ + ldr x4, [x4] /* Actual value */ + dsb sy + msr TTBR0_EL2, x4 + dsb sy + isb + tlbi alle2 + dsb sy /* Ensure completion of TLB flush */ + isb + + /* Non-boot CPUs report that they''ve got this far */ + ldr x0, =ready_cpus +1: ldaxr x1, [x0] /* { read # of ready CPUs } */ + add x1, x1, #1 /* Atomically { ++ } */ + stlxr w2, x1, [x0] /* { writeback } */ + cbnz w2, 1b + dsb sy + dc cvac, x0 /* Flush D-Cache */ + dsb sy + + /* Here, the non-boot CPUs must wait again -- they''re now running on + * the boot CPU''s pagetables so it''s safe for the boot CPU to + * overwrite the non-relocated copy of Xen. Once it''s done that, + * and brought up the memory allocator, non-boot CPUs can get their + * own stacks and enter C. */ +1: wfe + dsb sy + ldr x0, =smp_up_cpu + ldr x1, [x0] /* Which CPU is being booted? */ + cmp x1, x12 /* Is it us? */ + b.ne 1b + +launch: + ldr x0, =init_stack /* Find the boot-time stack */ + ldr x0, [x0] + add x0, x0, #STACK_SIZE /* (which grows down from the top). */ + sub x0, x0, #CPUINFO_sizeof /* Make room for CPU save record */ + mov sp, x0 + + mov x0, x20 /* Marshal args: - phys_offset */ + mov x1, x21 /* - FDT */ + mov x2, x22 /* - CPU ID */ + cbz x22, start_xen /* and disappear into the land of C */ + b start_secondary /* (to the appropriate entry point) */ + +/* Fail-stop + * r0: string explaining why */ +fail: PRINT("- Boot failed -\r\n") +1: wfe + b 1b + +#ifdef EARLY_UART_ADDRESS + +/* Bring up the UART. Specific to the PL011 UART. + * Clobbers r0-r2 */ +init_uart: + mov x1, #0x0 + strh w1, [x23, #0x24] /* -> UARTIBRD (Baud divisor fraction) */ + mov x1, #0x4 /* 7.3728MHz / 0x4 == 16 * 115200 */ + strh w1, [x23, #0x24] /* -> UARTIBRD (Baud divisor integer) */ + mov x1, #0x60 /* 8n1 */ + strh w1, [x23, #0x24] /* -> UARTLCR_H (Line control) */ + ldr x1, =0x00000301 /* RXE | TXE | UARTEN */ + strh w1, [x23, #0x30] /* -> UARTCR (Control Register) */ + adr x0, 1f + b puts +1: .asciz "- UART enabled -\r\n" + .align 4 + +/* Print early debug messages. Specific to the PL011 UART. + * r0: Nul-terminated string to print. + * Clobbers r0-r2 */ +puts: + ldrh w2, [x23, #0x18] /* <- UARTFR (Flag register) */ + tst w2, #0x8 /* Check BUSY bit */ + b.ne puts /* Wait for the UART to be ready */ + ldrb w2, [x0], #1 /* Load next char */ + cbz w2, 1f /* Exit on nul */ + str w2, [x23] /* -> UARTDR (Data Register) */ + b puts +1: + ret + +/* Print a 32-bit number in hex. Specific to the PL011 UART. + * r0: Number to print. + * clobbers r0-r3 */ +putn: + adr x1, hex + mov x3, #8 +1: ldrh w2, [x23, #0x18] /* <- UARTFR (Flag register) */ + tst w2, #0x8 /* Check BUSY bit */ + b.ne 1b /* Wait for the UART to be ready */ + and x2, x0, #0xf0000000 /* Mask off the top nybble */ + lsr x2, x2, #28 + ldrb w2, [x1, x2] /* Convert to a char */ + strb w2, [x23] /* -> UARTDR (Data Register) */ + lsl x0, x0, #4 /* Roll it through one nybble at a time */ + subs x3, x3, #1 + b.ne 1b + ret + +hex: .ascii "0123456789abcdef" + .align 2 + +#else /* EARLY_UART_ADDRESS */ + +init_uart: +.global early_puts +early_puts: +puts: +putn: mov pc, lr + +#endif /* EARLY_UART_ADDRESS */ diff --git a/xen/arch/arm/arm64/mode_switch.S b/xen/arch/arm/arm64/mode_switch.S new file mode 100644 index 0000000..d1f66e5 --- /dev/null +++ b/xen/arch/arm/arm64/mode_switch.S @@ -0,0 +1,83 @@ +/* + * xen/arch/arm/arm64/mode_switch.S + * + * Start-of day code to take a CPU from EL3 to EL2. Largely taken from + bootwrapper. + * + * Ian Campbell <ian.campbell@citrix.com> + * Copyright (c) 2012 Citrix Systems. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <asm/config.h> +#include <asm/page.h> +#include <asm/asm_defns.h> + +/* Get up a CPU into EL2. Clobbers x0-x3. + * + * Expects x22 == CPU number + * Expects x30 == EL2 entry point + * + * This code is specific to the VE model, and not intended to be used + * on production systems. As such it''s a bit hackier than the main + * boot code in head.S. In future it will be replaced by better + * integration with the bootloader/firmware so that Xen always starts + * at EL2. + */ + +.globl enter_el2_mode +enter_el2_mode: + mov x0, #0x30 // RES1 + orr x0, x0, #(1 << 0) // Non-secure EL1 + orr x0, x0, #(1 << 8) // HVC enable + orr x0, x0, #(1 << 10) // 64-bit EL2 + msr scr_el3, x0 + + msr cptr_el3, xzr // Disable copro. traps to EL3 + + ldr x0, =0x01800000 // 24Mhz + msr cntfrq_el0, x0 + + /* + * Check for the primary CPU to avoid a race on the distributor + * registers. + */ + cbnz x22, 1f + + ldr x1, =(GIC_BASE_ADDRESS+GIC_DR_OFFSET) // GICD_CTLR + mov w0, #3 // EnableGrp0 | EnableGrp1 + str w0, [x1] + +1: ldr x1, =(GIC_BASE_ADDRESS+GIC_DR_OFFSET+0x80) // GICD_IGROUPR + mov w0, #~0 // Grp1 interrupts + str w0, [x1], #4 + b.ne 2f // Only local interrupts for secondary CPUs + str w0, [x1], #4 + str w0, [x1], #4 + +2: ldr x1, =(GIC_BASE_ADDRESS+GIC_CR_OFFSET) // GICC_CTLR + ldr w0, [x1] + mov w0, #3 // EnableGrp0 | EnableGrp1 + str w0, [x1] + + mov w0, #1 << 7 // allow NS access to GICC_PMR + str w0, [x1, #4] // GICC_PMR + + msr sctlr_el2, xzr + + /* + * Prepare the switch to the EL2_SP1 mode from EL3 + */ + msr elr_el3, x30 // Return to desired function + mov x1, #0x3c9 // EL2_SP1 | D | A | I | F + msr spsr_el3, x1 + eret diff --git a/xen/arch/arm/xen.lds.S b/xen/arch/arm/xen.lds.S index 410d7db..b1f0a78 100644 --- a/xen/arch/arm/xen.lds.S +++ b/xen/arch/arm/xen.lds.S @@ -11,7 +11,13 @@ ENTRY(start) -OUTPUT_ARCH(arm) +#if defined(__arm__) +#define FORMAT arm +#elif defined(__aarch64__) +#define FORMAT aarch64 +#endif + +OUTPUT_ARCH(FORMAT) PHDRS { diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h index 9acd0af..e0a636f 100644 --- a/xen/include/asm-arm/page.h +++ b/xen/include/asm-arm/page.h @@ -38,6 +38,7 @@ */ #define MAIR0VAL 0xeeaa4400 #define MAIR1VAL 0xff000004 +#define MAIRVAL (MAIR0VAL|MAIR1VAL<<32) /* * Attribute Indexes. diff --git a/xen/include/public/arch-arm.h b/xen/include/public/arch-arm.h index 8dd9062..dc12524 100644 --- a/xen/include/public/arch-arm.h +++ b/xen/include/public/arch-arm.h @@ -174,6 +174,8 @@ typedef uint64_t xen_callback_t; /* 0-4: Mode */ #define PSR_MODE_MASK 0x1f + +/* 32 bit modes */ #define PSR_MODE_USR 0x10 #define PSR_MODE_FIQ 0x11 #define PSR_MODE_IRQ 0x12 @@ -184,6 +186,18 @@ typedef uint64_t xen_callback_t; #define PSR_MODE_UND 0x1b #define PSR_MODE_SYS 0x1f +/* 64 bit modes */ +#ifdef CONFIG_ARM_64 +#define PSR_MODE_BIT 0x10 /* Set iff AArch32 */ +#define PSR_MODE_EL3h 0x0d +#define PSR_MODE_EL3t 0x0c +#define PSR_MODE_EL2h 0x09 +#define PSR_MODE_EL2t 0x08 +#define PSR_MODE_EL1h 0x05 +#define PSR_MODE_EL1t 0x04 +#define PSR_MODE_EL0t 0x00 +#endif + #define PSR_THUMB (1<<5) /* Thumb Mode enable */ #define PSR_FIQ_MASK (1<<6) /* Fast Interrupt mask */ #define PSR_IRQ_MASK (1<<7) /* Interrupt mask */ diff --git a/xen/include/public/hvm/save.h b/xen/include/public/hvm/save.h index 5538d8e..cc8b5fd 100644 --- a/xen/include/public/hvm/save.h +++ b/xen/include/public/hvm/save.h @@ -102,7 +102,7 @@ DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end); #if defined(__i386__) || defined(__x86_64__) #include "../arch-x86/hvm/save.h" -#elif defined(__arm__) +#elif defined(__arm__) || defined(__aarch64__) #include "../arch-arm/hvm/save.h" #else #error "unsupported architecture" diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h index 846f446..a1927c0 100644 --- a/xen/include/public/xen.h +++ b/xen/include/public/xen.h @@ -31,7 +31,7 @@ #if defined(__i386__) || defined(__x86_64__) #include "arch-x86/xen.h" -#elif defined(__arm__) +#elif defined(__arm__) || defined (__aarch64__) #include "arch-arm.h" #else #error "Unsupported architecture" diff --git a/xen/include/xen/libelf.h b/xen/include/xen/libelf.h index e8f6508..218bb18 100644 --- a/xen/include/xen/libelf.h +++ b/xen/include/xen/libelf.h @@ -23,7 +23,7 @@ #ifndef __XEN_LIBELF_H__ #define __XEN_LIBELF_H__ -#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) +#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) #define XEN_ELF_LITTLE_ENDIAN #else #error define architectural endianness -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 06/46] xen: arm64: basic config and types headers
The 64-bit bitops are taken from the Linux asm-generic implementations. They should be replaced with optimised versions from the Linux arm64 port when they become available. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- v2: mention bitops heritage. --- xen/arch/arm/arm64/Makefile | 2 + xen/arch/arm/arm64/lib/Makefile | 1 + xen/arch/arm/arm64/lib/bitops.c | 22 +++ xen/arch/arm/arm64/lib/find_next_bit.c | 284 ++++++++++++++++++++++++++++++++ xen/include/asm-arm/arm32/bitops.h | 54 ++++++ xen/include/asm-arm/arm64/bitops.h | 283 +++++++++++++++++++++++++++++++ xen/include/asm-arm/bitops.h | 65 ++------ xen/include/asm-arm/config.h | 15 ++ xen/include/asm-arm/types.h | 17 ++- 9 files changed, 686 insertions(+), 57 deletions(-) create mode 100644 xen/arch/arm/arm64/lib/Makefile create mode 100644 xen/arch/arm/arm64/lib/bitops.c create mode 100644 xen/arch/arm/arm64/lib/find_next_bit.c create mode 100644 xen/include/asm-arm/arm32/bitops.h create mode 100644 xen/include/asm-arm/arm64/bitops.h diff --git a/xen/arch/arm/arm64/Makefile b/xen/arch/arm/arm64/Makefile index dffbeb1..c447eaa 100644 --- a/xen/arch/arm/arm64/Makefile +++ b/xen/arch/arm/arm64/Makefile @@ -1 +1,3 @@ +subdir-y += lib + obj-y += mode_switch.o diff --git a/xen/arch/arm/arm64/lib/Makefile b/xen/arch/arm/arm64/lib/Makefile new file mode 100644 index 0000000..32c02c4 --- /dev/null +++ b/xen/arch/arm/arm64/lib/Makefile @@ -0,0 +1 @@ +obj-y += bitops.o find_next_bit.o diff --git a/xen/arch/arm/arm64/lib/bitops.c b/xen/arch/arm/arm64/lib/bitops.c new file mode 100644 index 0000000..02d8d78 --- /dev/null +++ b/xen/arch/arm/arm64/lib/bitops.c @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2012 ARM Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <xen/spinlock.h> +#include <xen/bitops.h> + +spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] /*__lock_aligned*/ = { + [0 ... (ATOMIC_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED +}; diff --git a/xen/arch/arm/arm64/lib/find_next_bit.c b/xen/arch/arm/arm64/lib/find_next_bit.c new file mode 100644 index 0000000..aea69c2 --- /dev/null +++ b/xen/arch/arm/arm64/lib/find_next_bit.c @@ -0,0 +1,284 @@ +/* find_next_bit.c: fallback find next bit implementation + * + * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include <xen/config.h> +#include <xen/bitops.h> +#include <asm/types.h> +#include <asm/byteorder.h> + +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) + +#ifndef find_next_bit +/* + * Find the next set bit in a memory region. + */ +unsigned long find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; + if (offset) { + tmp = *(p++); + tmp &= (~0UL << offset); + if (size < BITS_PER_LONG) + goto found_first; + if (tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if ((tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + __ffs(tmp); +} +EXPORT_SYMBOL(find_next_bit); +#endif + +#ifndef find_next_zero_bit +/* + * This implementation of find_{first,next}_zero_bit was stolen from + * Linus'' asm-alpha/bitops.h. + */ +unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; + if (offset) { + tmp = *(p++); + tmp |= ~0UL >> (BITS_PER_LONG - offset); + if (size < BITS_PER_LONG) + goto found_first; + if (~tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if (~(tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp |= ~0UL << size; + if (tmp == ~0UL) /* Are any bits zero? */ + return result + size; /* Nope. */ +found_middle: + return result + ffz(tmp); +} +EXPORT_SYMBOL(find_next_zero_bit); +#endif + +#ifndef find_first_bit +/* + * Find the first set bit in a memory region. + */ +unsigned long find_first_bit(const unsigned long *addr, unsigned long size) +{ + const unsigned long *p = addr; + unsigned long result = 0; + unsigned long tmp; + + while (size & ~(BITS_PER_LONG-1)) { + if ((tmp = *(p++))) + goto found; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + + tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found: + return result + __ffs(tmp); +} +EXPORT_SYMBOL(find_first_bit); +#endif + +#ifndef find_first_zero_bit +/* + * Find the first cleared bit in a memory region. + */ +unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) +{ + const unsigned long *p = addr; + unsigned long result = 0; + unsigned long tmp; + + while (size & ~(BITS_PER_LONG-1)) { + if (~(tmp = *(p++))) + goto found; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + + tmp = (*p) | (~0UL << size); + if (tmp == ~0UL) /* Are any bits zero? */ + return result + size; /* Nope. */ +found: + return result + ffz(tmp); +} +EXPORT_SYMBOL(find_first_zero_bit); +#endif + +#ifdef __BIG_ENDIAN + +/* include/linux/byteorder does not support "unsigned long" type */ +static inline unsigned long ext2_swabp(const unsigned long * x) +{ +#if BITS_PER_LONG == 64 + return (unsigned long) __swab64p((u64 *) x); +#elif BITS_PER_LONG == 32 + return (unsigned long) __swab32p((u32 *) x); +#else +#error BITS_PER_LONG not defined +#endif +} + +/* include/linux/byteorder doesn''t support "unsigned long" type */ +static inline unsigned long ext2_swab(const unsigned long y) +{ +#if BITS_PER_LONG == 64 + return (unsigned long) __swab64((u64) y); +#elif BITS_PER_LONG == 32 + return (unsigned long) __swab32((u32) y); +#else +#error BITS_PER_LONG not defined +#endif +} + +#ifndef find_next_zero_bit_le +unsigned long find_next_zero_bit_le(const void *addr, unsigned + long size, unsigned long offset) +{ + const unsigned long *p = addr; + unsigned long result = offset & ~(BITS_PER_LONG - 1); + unsigned long tmp; + + if (offset >= size) + return size; + p += BITOP_WORD(offset); + size -= result; + offset &= (BITS_PER_LONG - 1UL); + if (offset) { + tmp = ext2_swabp(p++); + tmp |= (~0UL >> (BITS_PER_LONG - offset)); + if (size < BITS_PER_LONG) + goto found_first; + if (~tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + + while (size & ~(BITS_PER_LONG - 1)) { + if (~(tmp = *(p++))) + goto found_middle_swap; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = ext2_swabp(p); +found_first: + tmp |= ~0UL << size; + if (tmp == ~0UL) /* Are any bits zero? */ + return result + size; /* Nope. Skip ffz */ +found_middle: + return result + ffz(tmp); + +found_middle_swap: + return result + ffz(ext2_swab(tmp)); +} +EXPORT_SYMBOL(find_next_zero_bit_le); +#endif + +#ifndef find_next_bit_le +unsigned long find_next_bit_le(const void *addr, unsigned + long size, unsigned long offset) +{ + const unsigned long *p = addr; + unsigned long result = offset & ~(BITS_PER_LONG - 1); + unsigned long tmp; + + if (offset >= size) + return size; + p += BITOP_WORD(offset); + size -= result; + offset &= (BITS_PER_LONG - 1UL); + if (offset) { + tmp = ext2_swabp(p++); + tmp &= (~0UL << offset); + if (size < BITS_PER_LONG) + goto found_first; + if (tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + + while (size & ~(BITS_PER_LONG - 1)) { + tmp = *(p++); + if (tmp) + goto found_middle_swap; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = ext2_swabp(p); +found_first: + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + __ffs(tmp); + +found_middle_swap: + return result + __ffs(ext2_swab(tmp)); +} +EXPORT_SYMBOL(find_next_bit_le); +#endif + +#endif /* __BIG_ENDIAN */ diff --git a/xen/include/asm-arm/arm32/bitops.h b/xen/include/asm-arm/arm32/bitops.h new file mode 100644 index 0000000..0d05258 --- /dev/null +++ b/xen/include/asm-arm/arm32/bitops.h @@ -0,0 +1,54 @@ +#ifndef _ARM_ARM32_BITOPS_H +#define _ARM_ARM32_BITOPS_H + +extern void _set_bit(int nr, volatile void * p); +extern void _clear_bit(int nr, volatile void * p); +extern void _change_bit(int nr, volatile void * p); +extern int _test_and_set_bit(int nr, volatile void * p); +extern int _test_and_clear_bit(int nr, volatile void * p); +extern int _test_and_change_bit(int nr, volatile void * p); + +#define set_bit(n,p) _set_bit(n,p) +#define clear_bit(n,p) _clear_bit(n,p) +#define change_bit(n,p) _change_bit(n,p) +#define test_and_set_bit(n,p) _test_and_set_bit(n,p) +#define test_and_clear_bit(n,p) _test_and_clear_bit(n,p) +#define test_and_change_bit(n,p) _test_and_change_bit(n,p) + +/* + * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. + */ +extern int _find_first_zero_bit_le(const void * p, unsigned size); +extern int _find_next_zero_bit_le(const void * p, int size, int offset); +extern int _find_first_bit_le(const unsigned long *p, unsigned size); +extern int _find_next_bit_le(const unsigned long *p, int size, int offset); + +/* + * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. + */ +extern int _find_first_zero_bit_be(const void * p, unsigned size); +extern int _find_next_zero_bit_be(const void * p, int size, int offset); +extern int _find_first_bit_be(const unsigned long *p, unsigned size); +extern int _find_next_bit_be(const unsigned long *p, int size, int offset); + +#ifndef __ARMEB__ +/* + * These are the little endian, atomic definitions. + */ +#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) +#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) +#define find_first_bit(p,sz) _find_first_bit_le(p,sz) +#define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off) + +#else +/* + * These are the big endian, atomic definitions. + */ +#define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) +#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) +#define find_first_bit(p,sz) _find_first_bit_be(p,sz) +#define find_next_bit(p,sz,off) _find_next_bit_be(p,sz,off) + +#endif + +#endif /* _ARM_ARM32_BITOPS_H */ diff --git a/xen/include/asm-arm/arm64/bitops.h b/xen/include/asm-arm/arm64/bitops.h new file mode 100644 index 0000000..847d65c --- /dev/null +++ b/xen/include/asm-arm/arm64/bitops.h @@ -0,0 +1,283 @@ +#ifndef _ARM_ARM64_BITOPS_H +#define _ARM_ARM64_BITOPS_H + +/* Generic bitop support. Based on linux/include/asm-generic/bitops/atomic.h */ + +#include <xen/spinlock.h> +#include <xen/cache.h> /* we use L1_CACHE_BYTES */ + +/* Use an array of spinlocks for our atomic_ts. + * Hash function to index into a different SPINLOCK. + * Since "a" is usually an address, use one spinlock per cacheline. + */ +# define ATOMIC_HASH_SIZE 4 +# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) + +extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE]/* __lock_aligned*/; + +#define _atomic_spin_lock_irqsave(l,f) do { \ + spinlock_t *s = ATOMIC_HASH(l); \ + spin_lock_irqsave(s, f);\ +} while(0) + +#define _atomic_spin_unlock_irqrestore(l,f) do {\ + spinlock_t *s = ATOMIC_HASH(l); \ + spin_unlock_irqrestore(s,f); \ +} while(0) + +#define FIXUP(_p, _mask) \ + { \ + unsigned long __p = (unsigned long)_p; \ + if (__p & 0x7) { \ + if (_mask > 0xffffffff) { \ + __p = (__p+32)&~0x7; _mask >>=32; \ + } else { \ + __p &= ~0x7; _mask <<= 32; \ + } \ + if (0)printk("BITOPS: Fixup misaligned ptr %p => %#lx\n", _p, __p); \ + _p = (void *)__p; \ + } \ + } + +/** + * set_bit - Atomically set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * This function is atomic and may not be reordered. See __set_bit() + * if you do not require the atomic guarantees. + * + * Note: there are no guarantees that this function will not be reordered + * on non x86 architectures, so if you are writing portable code, + * make sure not to rely on its reordering guarantees. + * + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ + +static inline void set_bit(int nr, volatile void *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long flags; + + //printk("set_bit: nr %d addr %p mask %#lx p %p lock %p\n", + // nr, addr, mask, p, ATOMIC_HASH(p)); + FIXUP(p, mask); + //printk("set_bit: nr %d addr %p mask %#lx p %p lock %p\n", + // nr, addr, mask, p, ATOMIC_HASH(p)); + //printk("before *p is %#lx\n", *p); + _atomic_spin_lock_irqsave(p, flags); + *p |= mask; + _atomic_spin_unlock_irqrestore(p, flags); + //printk(" after *p is %#lx\n", *p); +} + +/** + * clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * clear_bit() is atomic and may not be reordered. However, it does + * not contain a memory barrier, so if it is used for locking purposes, + * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * in order to ensure changes are visible on other processors. + */ +static inline void clear_bit(int nr, volatile void *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long flags; + + FIXUP(p, mask); + + _atomic_spin_lock_irqsave(p, flags); + *p &= ~mask; + _atomic_spin_unlock_irqrestore(p, flags); +} + +/** + * change_bit - Toggle a bit in memory + * @nr: Bit to change + * @addr: Address to start counting from + * + * change_bit() is atomic and may not be reordered. It may be + * reordered on other architectures than x86. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void change_bit(int nr, volatile void *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long flags; + + FIXUP(p, mask); + + _atomic_spin_lock_irqsave(p, flags); + *p ^= mask; + _atomic_spin_unlock_irqrestore(p, flags); +} + +/** + * test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It may be reordered on other architectures than x86. + * It also implies a memory barrier. + */ +static inline int test_and_set_bit(int nr, volatile void *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags; + + FIXUP(p, mask); + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old | mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} + +/** + * test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It can be reorderdered on other architectures other than x86. + * It also implies a memory barrier. + */ +static inline int test_and_clear_bit(int nr, volatile void *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags; + + FIXUP(p, mask); + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old & ~mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} + +/** + * test_and_change_bit - Change a bit and return its old value + * @nr: Bit to change + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static inline int test_and_change_bit(int nr, volatile void *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags; + + FIXUP(p, mask); + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old ^ mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} + +/* Based on linux/include/asm-generic/bitops/builtin-__ffs.h */ +/** + * __ffs - find first bit in word. + * @word: The word to search + * + * Undefined if no bit exists, so code should check against 0 first. + */ +static /*__*/always_inline unsigned long __ffs(unsigned long word) +{ + return __builtin_ctzl(word); +} + +/* Based on linux/include/asm-generic/bitops/ffz.h */ +/* + * ffz - find first zero in word. + * @word: The word to search + * + * Undefined if no zero exists, so code should check against ~0UL first. + */ +#define ffz(x) __ffs(~(x)) + + + +/* Based on linux/include/asm-generic/bitops/find.h */ + +#ifndef find_next_bit +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + */ +extern unsigned long find_next_bit(const unsigned long *addr, unsigned long + size, unsigned long offset); +#endif + +#ifndef find_next_zero_bit +/** + * find_next_zero_bit - find the next cleared bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + */ +extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned + long size, unsigned long offset); +#endif + +#ifdef CONFIG_GENERIC_FIND_FIRST_BIT + +/** + * find_first_bit - find the first set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit number of the first set bit. + */ +extern unsigned long find_first_bit(const unsigned long *addr, + unsigned long size); + +/** + * find_first_zero_bit - find the first cleared bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit number of the first cleared bit. + */ +extern unsigned long find_first_zero_bit(const unsigned long *addr, + unsigned long size); +#else /* CONFIG_GENERIC_FIND_FIRST_BIT */ + +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) +#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) + +#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ + + +#endif /* _ARM_ARM64_BITOPS_H */ +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/include/asm-arm/bitops.h b/xen/include/asm-arm/bitops.h index 87de5db..563b4be 100644 --- a/xen/include/asm-arm/bitops.h +++ b/xen/include/asm-arm/bitops.h @@ -9,28 +9,14 @@ #ifndef _ARM_BITOPS_H #define _ARM_BITOPS_H -extern void _set_bit(int nr, volatile void * p); -extern void _clear_bit(int nr, volatile void * p); -extern void _change_bit(int nr, volatile void * p); -extern int _test_and_set_bit(int nr, volatile void * p); -extern int _test_and_clear_bit(int nr, volatile void * p); -extern int _test_and_change_bit(int nr, volatile void * p); - -#define set_bit(n,p) _set_bit(n,p) -#define clear_bit(n,p) _clear_bit(n,p) -#define change_bit(n,p) _change_bit(n,p) -#define test_and_set_bit(n,p) _test_and_set_bit(n,p) -#define test_and_clear_bit(n,p) _test_and_clear_bit(n,p) -#define test_and_change_bit(n,p) _test_and_change_bit(n,p) - /* * Non-atomic bit manipulation. * * Implemented using atomics to be interrupt safe. Could alternatively * implement with local interrupt masking. */ -#define __set_bit(n,p) _set_bit(n,p) -#define __clear_bit(n,p) _clear_bit(n,p) +#define __set_bit(n,p) set_bit(n,p) +#define __clear_bit(n,p) clear_bit(n,p) #define BIT(nr) (1UL << (nr)) #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) @@ -40,6 +26,14 @@ extern int _test_and_change_bit(int nr, volatile void * p); #define ADDR (*(volatile long *) addr) #define CONST_ADDR (*(const volatile long *) addr) +#if defined(CONFIG_ARM_32) +# include <asm/arm32/bitops.h> +#elif defined(CONFIG_ARM_64) +# include <asm/arm64/bitops.h> +#else +# error "unknown ARM variant" +#endif + /** * __test_and_set_bit - Set a bit and return its old value * @nr: Bit to set @@ -104,42 +98,6 @@ static inline int test_bit(int nr, const volatile void *addr) return 1UL & (p[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); } -/* - * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. - */ -extern int _find_first_zero_bit_le(const void * p, unsigned size); -extern int _find_next_zero_bit_le(const void * p, int size, int offset); -extern int _find_first_bit_le(const unsigned long *p, unsigned size); -extern int _find_next_bit_le(const unsigned long *p, int size, int offset); - -/* - * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. - */ -extern int _find_first_zero_bit_be(const void * p, unsigned size); -extern int _find_next_zero_bit_be(const void * p, int size, int offset); -extern int _find_first_bit_be(const unsigned long *p, unsigned size); -extern int _find_next_bit_be(const unsigned long *p, int size, int offset); - -#ifndef __ARMEB__ -/* - * These are the little endian, atomic definitions. - */ -#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) -#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) -#define find_first_bit(p,sz) _find_first_bit_le(p,sz) -#define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off) - -#else -/* - * These are the big endian, atomic definitions. - */ -#define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) -#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) -#define find_first_bit(p,sz) _find_first_bit_be(p,sz) -#define find_next_bit(p,sz,off) _find_next_bit_be(p,sz,off) - -#endif - static inline int constant_fls(int x) { int r = 32; @@ -182,10 +140,11 @@ static inline int fls(int x) return constant_fls(x); asm("clz\t%0, %1" : "=r" (ret) : "r" (x)); - ret = 32 - ret; + ret = BITS_PER_LONG - ret; return ret; } + #define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); }) /** diff --git a/xen/include/asm-arm/config.h b/xen/include/asm-arm/config.h index e5dce5e..add70bd 100644 --- a/xen/include/asm-arm/config.h +++ b/xen/include/asm-arm/config.h @@ -7,6 +7,21 @@ #ifndef __ARM_CONFIG_H__ #define __ARM_CONFIG_H__ +#if defined(__aarch64__) +# define CONFIG_ARM_64 1 +#elif defined(__arm__) +# define CONFIG_ARM_32 1 +#endif + +#if defined(CONFIG_ARM_64) +# define LONG_BYTEORDER 3 +#else +# define LONG_BYTEORDER 2 +#endif + +#define BYTES_PER_LONG (1 << LONG_BYTEORDER) +#define BITS_PER_LONG (BYTES_PER_LONG << 3) + #define CONFIG_PAGING_ASSISTANCE 1 #define CONFIG_PAGING_LEVELS 3 diff --git a/xen/include/asm-arm/types.h b/xen/include/asm-arm/types.h index 48864f9..07f7898 100644 --- a/xen/include/asm-arm/types.h +++ b/xen/include/asm-arm/types.h @@ -15,8 +15,13 @@ typedef __signed__ int __s32; typedef unsigned int __u32; #if defined(__GNUC__) && !defined(__STRICT_ANSI__) +#if defined(CONFIG_ARM_32) typedef __signed__ long long __s64; typedef unsigned long long __u64; +#elif defined (CONFIG_ARM_64) +typedef __signed__ long __s64; +typedef unsigned long __u64; +#endif #endif typedef signed char s8; @@ -28,11 +33,19 @@ typedef unsigned short u16; typedef signed int s32; typedef unsigned int u32; +#if defined(CONFIG_ARM_32) typedef signed long long s64; typedef unsigned long long u64; typedef u64 paddr_t; #define INVALID_PADDR (~0ULL) #define PRIpaddr "016llx" +#elif defined (CONFIG_ARM_64) +typedef signed long s64; +typedef unsigned long u64; +typedef u64 paddr_t; +#define INVALID_PADDR (~0UL) +#define PRIpaddr "016lx" +#endif typedef unsigned long size_t; @@ -42,10 +55,6 @@ typedef char bool_t; #endif /* __ASSEMBLY__ */ -#define BITS_PER_LONG 32 -#define BYTES_PER_LONG 4 -#define LONG_BYTEORDER 2 - #endif /* __ARM_TYPES_H__ */ /* * Local variables: -- 1.7.2.5
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- v2: no change, but these need to be revisited considering the interaction of sev/wfe etc. May need to rework the generic code in order to make best use of wfe (on 32-bit ARM too) --- xen/include/asm-arm/arm32/spinlock.h | 141 ++++++++++++++++++++++++++++++++++ xen/include/asm-arm/arm64/spinlock.h | 125 ++++++++++++++++++++++++++++++ xen/include/asm-arm/spinlock.h | 135 ++------------------------------ 3 files changed, 273 insertions(+), 128 deletions(-) create mode 100644 xen/include/asm-arm/arm32/spinlock.h create mode 100644 xen/include/asm-arm/arm64/spinlock.h diff --git a/xen/include/asm-arm/arm32/spinlock.h b/xen/include/asm-arm/arm32/spinlock.h new file mode 100644 index 0000000..a7bcdbf --- /dev/null +++ b/xen/include/asm-arm/arm32/spinlock.h @@ -0,0 +1,141 @@ +#ifndef __ASM_ARM32_SPINLOCK_H +#define __ASM_ARM32_SPINLOCK_H + +static inline void dsb_sev(void) +{ + __asm__ __volatile__ ( + "dsb\n" + "sev\n" + ); +} + +typedef struct { + volatile unsigned int lock; +} raw_spinlock_t; + +#define _RAW_SPIN_LOCK_UNLOCKED { 0 } + +#define _raw_spin_is_locked(x) ((x)->lock != 0) + +static always_inline void _raw_spin_unlock(raw_spinlock_t *lock) +{ + ASSERT(_raw_spin_is_locked(lock)); + + smp_mb(); + + __asm__ __volatile__( +" str %1, [%0]\n" + : + : "r" (&lock->lock), "r" (0) + : "cc"); + + dsb_sev(); +} + +static always_inline int _raw_spin_trylock(raw_spinlock_t *lock) +{ + unsigned long tmp; + + __asm__ __volatile__( +" ldrex %0, [%1]\n" +" teq %0, #0\n" +" strexeq %0, %2, [%1]" + : "=&r" (tmp) + : "r" (&lock->lock), "r" (1) + : "cc"); + + if (tmp == 0) { + smp_mb(); + return 1; + } else { + return 0; + } +} + +typedef struct { + volatile unsigned int lock; +} raw_rwlock_t; + +#define _RAW_RW_LOCK_UNLOCKED { 0 } + +static always_inline int _raw_read_trylock(raw_rwlock_t *rw) +{ + unsigned long tmp, tmp2 = 1; + + __asm__ __volatile__( +"1: ldrex %0, [%2]\n" +" adds %0, %0, #1\n" +" strexpl %1, %0, [%2]\n" + : "=&r" (tmp), "+r" (tmp2) + : "r" (&rw->lock) + : "cc"); + + smp_mb(); + return tmp2 == 0; +} + +static always_inline int _raw_write_trylock(raw_rwlock_t *rw) +{ + unsigned long tmp; + + __asm__ __volatile__( +"1: ldrex %0, [%1]\n" +" teq %0, #0\n" +" strexeq %0, %2, [%1]" + : "=&r" (tmp) + : "r" (&rw->lock), "r" (0x80000000) + : "cc"); + + if (tmp == 0) { + smp_mb(); + return 1; + } else { + return 0; + } +} + +static inline void _raw_read_unlock(raw_rwlock_t *rw) +{ + unsigned long tmp, tmp2; + + smp_mb(); + + __asm__ __volatile__( +"1: ldrex %0, [%2]\n" +" sub %0, %0, #1\n" +" strex %1, %0, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (tmp), "=&r" (tmp2) + : "r" (&rw->lock) + : "cc"); + + if (tmp == 0) + dsb_sev(); +} + +static inline void _raw_write_unlock(raw_rwlock_t *rw) +{ + smp_mb(); + + __asm__ __volatile__( + "str %1, [%0]\n" + : + : "r" (&rw->lock), "r" (0) + : "cc"); + + dsb_sev(); +} + +#define _raw_rw_is_locked(x) ((x)->lock != 0) +#define _raw_rw_is_write_locked(x) ((x)->lock == 0x80000000) + +#endif /* __ASM_SPINLOCK_H */ +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/include/asm-arm/arm64/spinlock.h b/xen/include/asm-arm/arm64/spinlock.h new file mode 100644 index 0000000..52ad688 --- /dev/null +++ b/xen/include/asm-arm/arm64/spinlock.h @@ -0,0 +1,125 @@ +/* + * Derived from Linux arch64 spinlock.h which is: + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ASM_ARM64_SPINLOCK_H +#define __ASM_ARM64_SPINLOCK_H + +typedef struct { + volatile unsigned int lock; +} raw_spinlock_t; + +#define _RAW_SPIN_LOCK_UNLOCKED { 0 } + +#define _raw_spin_is_locked(x) ((x)->lock != 0) + +static always_inline void _raw_spin_unlock(raw_spinlock_t *lock) +{ + ASSERT(_raw_spin_is_locked(lock)); + + asm volatile( + " stlr %w1, [%0]\n" + : : "r" (&lock->lock), "r" (0) : "memory"); +} + +static always_inline int _raw_spin_trylock(raw_spinlock_t *lock) +{ + unsigned int tmp; + + asm volatile( + " ldaxr %w0, [%1]\n" + " cbnz %w0, 1f\n" + " stxr %w0, %w2, [%1]\n" + "1:\n" + : "=&r" (tmp) + : "r" (&lock->lock), "r" (1) + : "memory"); + + return !tmp; +} + +typedef struct { + volatile unsigned int lock; +} raw_rwlock_t; + +#define _RAW_RW_LOCK_UNLOCKED { 0 } + +static always_inline int _raw_read_trylock(raw_rwlock_t *rw) +{ + unsigned int tmp, tmp2 = 1; + + asm volatile( + " ldaxr %w0, [%2]\n" + " add %w0, %w0, #1\n" + " tbnz %w0, #31, 1f\n" + " stxr %w1, %w0, [%2]\n" + "1:\n" + : "=&r" (tmp), "+r" (tmp2) + : "r" (&rw->lock) + : "memory"); + + return !tmp2; +} + +static always_inline int _raw_write_trylock(raw_rwlock_t *rw) +{ + unsigned int tmp; + + asm volatile( + " ldaxr %w0, [%1]\n" + " cbnz %w0, 1f\n" + " stxr %w0, %w2, [%1]\n" + "1:\n" + : "=&r" (tmp) + : "r" (&rw->lock), "r" (0x80000000) + : "memory"); + + return !tmp; +} + +static inline void _raw_read_unlock(raw_rwlock_t *rw) +{ + unsigned int tmp, tmp2; + + asm volatile( + "1: ldxr %w0, [%2]\n" + " sub %w0, %w0, #1\n" + " stlxr %w1, %w0, [%2]\n" + " cbnz %w1, 1b\n" + : "=&r" (tmp), "=&r" (tmp2) + : "r" (&rw->lock) + : "memory"); +} + +static inline void _raw_write_unlock(raw_rwlock_t *rw) +{ + asm volatile( + " stlr %w1, [%0]\n" + : : "r" (&rw->lock), "r" (0) : "memory"); +} + +#define _raw_rw_is_locked(x) ((x)->lock != 0) +#define _raw_rw_is_write_locked(x) ((x)->lock == 0x80000000) + +#endif /* __ASM_SPINLOCK_H */ +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/include/asm-arm/spinlock.h b/xen/include/asm-arm/spinlock.h index b1825c9..d753210 100644 --- a/xen/include/asm-arm/spinlock.h +++ b/xen/include/asm-arm/spinlock.h @@ -4,134 +4,13 @@ #include <xen/config.h> #include <xen/lib.h> -static inline void dsb_sev(void) -{ - __asm__ __volatile__ ( - "dsb\n" - "sev\n" - ); -} - -typedef struct { - volatile unsigned int lock; -} raw_spinlock_t; - -#define _RAW_SPIN_LOCK_UNLOCKED { 0 } - -#define _raw_spin_is_locked(x) ((x)->lock != 0) - -static always_inline void _raw_spin_unlock(raw_spinlock_t *lock) -{ - ASSERT(_raw_spin_is_locked(lock)); - - smp_mb(); - - __asm__ __volatile__( -" str %1, [%0]\n" - : - : "r" (&lock->lock), "r" (0) - : "cc"); - - dsb_sev(); -} - -static always_inline int _raw_spin_trylock(raw_spinlock_t *lock) -{ - unsigned long tmp; - - __asm__ __volatile__( -" ldrex %0, [%1]\n" -" teq %0, #0\n" -" strexeq %0, %2, [%1]" - : "=&r" (tmp) - : "r" (&lock->lock), "r" (1) - : "cc"); - - if (tmp == 0) { - smp_mb(); - return 1; - } else { - return 0; - } -} - -typedef struct { - volatile unsigned int lock; -} raw_rwlock_t; - -#define _RAW_RW_LOCK_UNLOCKED { 0 } - -static always_inline int _raw_read_trylock(raw_rwlock_t *rw) -{ - unsigned long tmp, tmp2 = 1; - - __asm__ __volatile__( -"1: ldrex %0, [%2]\n" -" adds %0, %0, #1\n" -" strexpl %1, %0, [%2]\n" - : "=&r" (tmp), "+r" (tmp2) - : "r" (&rw->lock) - : "cc"); - - smp_mb(); - return tmp2 == 0; -} - -static always_inline int _raw_write_trylock(raw_rwlock_t *rw) -{ - unsigned long tmp; - - __asm__ __volatile__( -"1: ldrex %0, [%1]\n" -" teq %0, #0\n" -" strexeq %0, %2, [%1]" - : "=&r" (tmp) - : "r" (&rw->lock), "r" (0x80000000) - : "cc"); - - if (tmp == 0) { - smp_mb(); - return 1; - } else { - return 0; - } -} - -static inline void _raw_read_unlock(raw_rwlock_t *rw) -{ - unsigned long tmp, tmp2; - - smp_mb(); - - __asm__ __volatile__( -"1: ldrex %0, [%2]\n" -" sub %0, %0, #1\n" -" strex %1, %0, [%2]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (tmp), "=&r" (tmp2) - : "r" (&rw->lock) - : "cc"); - - if (tmp == 0) - dsb_sev(); -} - -static inline void _raw_write_unlock(raw_rwlock_t *rw) -{ - smp_mb(); - - __asm__ __volatile__( - "str %1, [%0]\n" - : - : "r" (&rw->lock), "r" (0) - : "cc"); - - dsb_sev(); -} - -#define _raw_rw_is_locked(x) ((x)->lock != 0) -#define _raw_rw_is_write_locked(x) ((x)->lock == 0x80000000) +#if defined(CONFIG_ARM_32) +# include <asm/arm32/spinlock.h> +#elif defined(CONFIG_ARM_64) +# include <asm/arm64/spinlock.h> +#else +# error "unknown ARM variant" +#endif #endif /* __ASM_SPINLOCK_H */ /* -- 1.7.2.5
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- v2: Remove unused, #if-0''d, 64-bit atomics. --- xen/include/asm-arm/arm32/atomic.h | 151 +++++++++++++++++++++++++++++ xen/include/asm-arm/arm64/atomic.h | 163 +++++++++++++++++++++++++++++++ xen/include/asm-arm/atomic.h | 186 +++++++----------------------------- 3 files changed, 347 insertions(+), 153 deletions(-) create mode 100644 xen/include/asm-arm/arm32/atomic.h create mode 100644 xen/include/asm-arm/arm64/atomic.h diff --git a/xen/include/asm-arm/arm32/atomic.h b/xen/include/asm-arm/arm32/atomic.h new file mode 100644 index 0000000..4ee6626 --- /dev/null +++ b/xen/include/asm-arm/arm32/atomic.h @@ -0,0 +1,151 @@ +/* + * arch/arm/include/asm/atomic.h + * + * Copyright (C) 1996 Russell King. + * Copyright (C) 2002 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ARCH_ARM_ARM32_ATOMIC__ +#define __ARCH_ARM_ARM32_ATOMIC__ + +/* + * ARMv6 UP and SMP safe atomic ops. We use load exclusive and + * store exclusive to ensure that these are atomic. We may loop + * to ensure that the update happens. + */ +static inline void atomic_add(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + __asm__ __volatile__("@ atomic_add\n" +"1: ldrex %0, [%3]\n" +" add %0, %0, %4\n" +" strex %1, %0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "Ir" (i) + : "cc"); +} + +static inline int atomic_add_return(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + smp_mb(); + + __asm__ __volatile__("@ atomic_add_return\n" +"1: ldrex %0, [%3]\n" +" add %0, %0, %4\n" +" strex %1, %0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "Ir" (i) + : "cc"); + + smp_mb(); + + return result; +} + +static inline void atomic_sub(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + __asm__ __volatile__("@ atomic_sub\n" +"1: ldrex %0, [%3]\n" +" sub %0, %0, %4\n" +" strex %1, %0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "Ir" (i) + : "cc"); +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + smp_mb(); + + __asm__ __volatile__("@ atomic_sub_return\n" +"1: ldrex %0, [%3]\n" +" sub %0, %0, %4\n" +" strex %1, %0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "Ir" (i) + : "cc"); + + smp_mb(); + + return result; +} + +static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) +{ + unsigned long oldval, res; + + smp_mb(); + + do { + __asm__ __volatile__("@ atomic_cmpxchg\n" + "ldrex %1, [%3]\n" + "mov %0, #0\n" + "teq %1, %4\n" + "strexeq %0, %5, [%3]\n" + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) + : "r" (&ptr->counter), "Ir" (old), "r" (new) + : "cc"); + } while (res); + + smp_mb(); + + return oldval; +} + +static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) +{ + unsigned long tmp, tmp2; + + __asm__ __volatile__("@ atomic_clear_mask\n" +"1: ldrex %0, [%3]\n" +" bic %0, %0, %4\n" +" strex %1, %0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr) + : "r" (addr), "Ir" (mask) + : "cc"); +} + +#define atomic_inc(v) atomic_add(1, v) +#define atomic_dec(v) atomic_sub(1, v) + +#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) +#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) +#define atomic_inc_return(v) (atomic_add_return(1, v)) +#define atomic_dec_return(v) (atomic_sub_return(1, v)) +#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) + +#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) + +#endif /* __ARCH_ARM_ARM32_ATOMIC__ */ +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/include/asm-arm/arm64/atomic.h b/xen/include/asm-arm/arm64/atomic.h new file mode 100644 index 0000000..972d50c --- /dev/null +++ b/xen/include/asm-arm/arm64/atomic.h @@ -0,0 +1,163 @@ +/* + * Based on arch/arm64/include/asm/atomic.h + * which in turn is + * Based on arch/arm/include/asm/atomic.h + * + * Copyright (C) 1996 Russell King. + * Copyright (C) 2002 Deep Blue Solutions Ltd. + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ARCH_ARM_ARM64_ATOMIC +#define __ARCH_ARM_ARM64_ATOMIC + +/* + * AArch64 UP and SMP safe atomic ops. We use load exclusive and + * store exclusive to ensure that these are atomic. We may loop + * to ensure that the update happens. + */ +static inline void atomic_add(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + asm volatile("// atomic_add\n" +"1: ldxr %w0, [%3]\n" +" add %w0, %w0, %w4\n" +" stxr %w1, %w0, [%3]\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+o" (v->counter) + : "r" (&v->counter), "Ir" (i) + : "cc"); +} + +static inline int atomic_add_return(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + asm volatile("// atomic_add_return\n" +"1: ldaxr %w0, [%3]\n" +" add %w0, %w0, %w4\n" +" stlxr %w1, %w0, [%3]\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+o" (v->counter) + : "r" (&v->counter), "Ir" (i) + : "cc"); + + return result; +} + +static inline void atomic_sub(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + asm volatile("// atomic_sub\n" +"1: ldxr %w0, [%3]\n" +" sub %w0, %w0, %w4\n" +" stxr %w1, %w0, [%3]\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+o" (v->counter) + : "r" (&v->counter), "Ir" (i) + : "cc"); +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + asm volatile("// atomic_sub_return\n" +"1: ldaxr %w0, [%3]\n" +" sub %w0, %w0, %w4\n" +" stlxr %w1, %w0, [%3]\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+o" (v->counter) + : "r" (&v->counter), "Ir" (i) + : "cc"); + + return result; +} + +static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) +{ + unsigned long tmp; + int oldval; + + asm volatile("// atomic_cmpxchg\n" +"1: ldaxr %w1, [%3]\n" +" cmp %w1, %w4\n" +" b.ne 2f\n" +" stlxr %w0, %w5, [%3]\n" +" cbnz %w0, 1b\n" +"2:" + : "=&r" (tmp), "=&r" (oldval), "+o" (ptr->counter) + : "r" (&ptr->counter), "Ir" (old), "r" (new) + : "cc"); + + return oldval; +} + +static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) +{ + unsigned long tmp, tmp2; + + asm volatile("// atomic_clear_mask\n" +"1: ldxr %0, [%3]\n" +" bic %0, %0, %4\n" +" stxr %w1, %0, [%3]\n" +" cbnz %w1, 1b" + : "=&r" (tmp), "=&r" (tmp2), "+o" (*addr) + : "r" (addr), "Ir" (mask) + : "cc"); +} + +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +static inline int __atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + + c = atomic_read(v); + while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) + c = old; + return c; +} + +#define atomic_inc(v) atomic_add(1, v) +#define atomic_dec(v) atomic_sub(1, v) + +#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) +#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) +#define atomic_inc_return(v) (atomic_add_return(1, v)) +#define atomic_dec_return(v) (atomic_sub_return(1, v)) +#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) + +#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) + +#define smp_mb__before_atomic_dec() smp_mb() +#define smp_mb__after_atomic_dec() smp_mb() +#define smp_mb__before_atomic_inc() smp_mb() +#define smp_mb__after_atomic_inc() smp_mb() + +#endif +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/include/asm-arm/atomic.h b/xen/include/asm-arm/atomic.h index c7eadd6..b37b2d0 100644 --- a/xen/include/asm-arm/atomic.h +++ b/xen/include/asm-arm/atomic.h @@ -1,48 +1,49 @@ -/* - * arch/arm/include/asm/atomic.h - * - * Copyright (C) 1996 Russell King. - * Copyright (C) 2002 Deep Blue Solutions Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ #ifndef __ARCH_ARM_ATOMIC__ #define __ARCH_ARM_ATOMIC__ #include <xen/config.h> #include <asm/system.h> -#define build_atomic_read(name, size, type, reg) \ +#define build_atomic_read(name, size, width, type, reg)\ static inline type name(const volatile type *addr) \ { \ type ret; \ - asm volatile("ldr" size " %0,%1" \ + asm volatile("ldr" size " %" width "0,%1" \ : reg (ret) \ : "m" (*(volatile type *)addr)); \ return ret; \ } -#define build_atomic_write(name, size, type, reg) \ +#define build_atomic_write(name, size, width, type, reg) \ static inline void name(volatile type *addr, type val) \ { \ - asm volatile("str" size " %1,%0" \ + asm volatile("str" size " %"width"1,%0" \ : "=m" (*(volatile type *)addr) \ : reg (val)); \ } -build_atomic_read(read_u8_atomic, "b", uint8_t, "=q") -build_atomic_read(read_u16_atomic, "h", uint16_t, "=r") -build_atomic_read(read_u32_atomic, "", uint32_t, "=r") -//build_atomic_read(read_u64_atomic, "d", uint64_t, "=r") -build_atomic_read(read_int_atomic, "", int, "=r") - -build_atomic_write(write_u8_atomic, "b", uint8_t, "q") -build_atomic_write(write_u16_atomic, "h", uint16_t, "r") -build_atomic_write(write_u32_atomic, "", uint32_t, "r") -//build_atomic_write(write_u64_atomic, "d", uint64_t, "r") -build_atomic_write(write_int_atomic, "", int, "r") +#if defined (CONFIG_ARM_32) +#define BYTE "" +#define WORD "" +#elif defined (CONFIG_ARM_64) +#define BYTE "w" +#define WORD "w" +#endif + +build_atomic_read(read_u8_atomic, "b", BYTE, uint8_t, "=r") +build_atomic_read(read_u16_atomic, "h", WORD, uint16_t, "=r") +build_atomic_read(read_u32_atomic, "", WORD, uint32_t, "=r") +build_atomic_read(read_int_atomic, "", WORD, int, "=r") + +build_atomic_write(write_u8_atomic, "b", BYTE, uint8_t, "r") +build_atomic_write(write_u16_atomic, "h", WORD, uint16_t, "r") +build_atomic_write(write_u32_atomic, "", WORD, uint32_t, "r") +build_atomic_write(write_int_atomic, "", WORD, int, "r") + +#if 0 /* defined (CONFIG_ARM_64) */ +build_atomic_read(read_u64_atomic, "x", uint64_t, "=r") +build_atomic_write(write_u64_atomic, "x", uint64_t, "r") +#endif void __bad_atomic_size(void); @@ -88,134 +89,13 @@ typedef struct { int counter; } atomic_t; #define _atomic_set(v,i) (((v).counter) = (i)) #define atomic_set(v,i) (((v)->counter) = (i)) -/* - * ARMv6 UP and SMP safe atomic ops. We use load exclusive and - * store exclusive to ensure that these are atomic. We may loop - * to ensure that the update happens. - */ -static inline void atomic_add(int i, atomic_t *v) -{ - unsigned long tmp; - int result; - - __asm__ __volatile__("@ atomic_add\n" -"1: ldrex %0, [%3]\n" -" add %0, %0, %4\n" -" strex %1, %0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "Ir" (i) - : "cc"); -} - -static inline int atomic_add_return(int i, atomic_t *v) -{ - unsigned long tmp; - int result; - - smp_mb(); - - __asm__ __volatile__("@ atomic_add_return\n" -"1: ldrex %0, [%3]\n" -" add %0, %0, %4\n" -" strex %1, %0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "Ir" (i) - : "cc"); - - smp_mb(); - - return result; -} - -static inline void atomic_sub(int i, atomic_t *v) -{ - unsigned long tmp; - int result; - - __asm__ __volatile__("@ atomic_sub\n" -"1: ldrex %0, [%3]\n" -" sub %0, %0, %4\n" -" strex %1, %0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "Ir" (i) - : "cc"); -} - -static inline int atomic_sub_return(int i, atomic_t *v) -{ - unsigned long tmp; - int result; - - smp_mb(); - - __asm__ __volatile__("@ atomic_sub_return\n" -"1: ldrex %0, [%3]\n" -" sub %0, %0, %4\n" -" strex %1, %0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "Ir" (i) - : "cc"); - - smp_mb(); - - return result; -} - -static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) -{ - unsigned long oldval, res; - - smp_mb(); - - do { - __asm__ __volatile__("@ atomic_cmpxchg\n" - "ldrex %1, [%3]\n" - "mov %0, #0\n" - "teq %1, %4\n" - "strexeq %0, %5, [%3]\n" - : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) - : "r" (&ptr->counter), "Ir" (old), "r" (new) - : "cc"); - } while (res); - - smp_mb(); - - return oldval; -} - -static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) -{ - unsigned long tmp, tmp2; - - __asm__ __volatile__("@ atomic_clear_mask\n" -"1: ldrex %0, [%3]\n" -" bic %0, %0, %4\n" -" strex %1, %0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr) - : "r" (addr), "Ir" (mask) - : "cc"); -} - -#define atomic_inc(v) atomic_add(1, v) -#define atomic_dec(v) atomic_sub(1, v) - -#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) -#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) -#define atomic_inc_return(v) (atomic_add_return(1, v)) -#define atomic_dec_return(v) (atomic_sub_return(1, v)) -#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) - -#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) +#if defined(CONFIG_ARM_32) +# include <asm/arm32/atomic.h> +#elif defined(CONFIG_ARM_64) +# include <asm/arm64/atomic.h> +#else +# error "unknown ARM variant" +#endif static inline atomic_t atomic_compareandswap( atomic_t old, atomic_t new, atomic_t *v) -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 09/46] xen: arm: refactor co-pro and sysreg reg handling.
AArch64 has removed the concept of co-processors replacing them with a combination of specific instructions (cache and tlb flushes etc) and system registers (which are understood by name in the assembler). However most system registers are equivalent to a particular AArch32 co-pro register and can be used by generic code in the same way. Note that the names of the registers differ (often only slightly) For consistency it would be better to use only set of names in the common code. Therefore move the {READ,WRITE}_CP{32,64} accessors into arm32/processor.h and provide {READ,WRITE}_SYSREG. Where the names differ #defines will be provided on 32-bit. HSR_CPREG and friends are required even on 64-bit in order to decode traps from 32 bit guests. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- xen/include/asm-arm/arm32/processor.h | 68 +++++++++++++++++++++++++++++++++ xen/include/asm-arm/arm64/processor.h | 37 ++++++++++++++++++ xen/include/asm-arm/cpregs.h | 40 +++---------------- xen/include/asm-arm/processor.h | 9 +++- 4 files changed, 118 insertions(+), 36 deletions(-) create mode 100644 xen/include/asm-arm/arm32/processor.h create mode 100644 xen/include/asm-arm/arm64/processor.h diff --git a/xen/include/asm-arm/arm32/processor.h b/xen/include/asm-arm/arm32/processor.h new file mode 100644 index 0000000..843fbd2 --- /dev/null +++ b/xen/include/asm-arm/arm32/processor.h @@ -0,0 +1,68 @@ +#ifndef __ASM_ARM_ARM32_PROCESSOR_H +#define __ASM_ARM_ARM32_PROCESSOR_H + +/* Layout as used in assembly, with src/dest registers mixed in */ +#define __CP32(r, coproc, opc1, crn, crm, opc2) coproc, opc1, r, crn, crm, opc2 +#define __CP64(r1, r2, coproc, opc, crm) coproc, opc, r1, r2, crm +#define CP32(r, name...) __CP32(r, name) +#define CP64(r, name...) __CP64(r, name) + +/* Stringified for inline assembly */ +#define LOAD_CP32(r, name...) "mrc " __stringify(CP32(%r, name)) ";" +#define STORE_CP32(r, name...) "mcr " __stringify(CP32(%r, name)) ";" +#define LOAD_CP64(r, name...) "mrrc " __stringify(CP64(%r, %H##r, name)) ";" +#define STORE_CP64(r, name...) "mcrr " __stringify(CP64(%r, %H##r, name)) ";" + +#ifndef __ASSEMBLY__ + +/* C wrappers */ +#define READ_CP32(name...) ({ \ + register uint32_t _r; \ + asm volatile(LOAD_CP32(0, name) : "=r" (_r)); \ + _r; }) + +#define WRITE_CP32(v, name...) do { \ + register uint32_t _r = (v); \ + asm volatile(STORE_CP32(0, name) : : "r" (_r)); \ +} while (0) + +#define READ_CP64(name...) ({ \ + register uint64_t _r; \ + asm volatile(LOAD_CP64(0, name) : "=r" (_r)); \ + _r; }) + +#define WRITE_CP64(v, name...) do { \ + register uint64_t _r = (v); \ + asm volatile(STORE_CP64(0, name) : : "r" (_r)); \ +} while (0) + +/* + * C wrappers for accessing system registers. + * + * Registers come in 3 types: + * - those which are always 32-bit regardless of AArch32 vs AArch64 + * (use {READ,WRITE}_SYSREG32). + * - those which are always 64-bit regardless of AArch32 vs AArch64 + * (use {READ,WRITE}_SYSREG64). + * - those which vary between AArch32 and AArch64 (use {READ,WRITE}_SYSREG). + */ +#define READ_SYSREG32(R...) READ_CP32(R) +#define WRITE_SYSREG32(V, R...) WRITE_CP32(V, R) + +#define READ_SYSREG64(R...) READ_CP64(R) +#define WRITE_SYSREG64(V, R...) WRITE_CP64(V, R) + +#define READ_SYSREG(R...) READ_SYSREG32(R) +#define WRITE_SYSREG(V, R...) WRITE_SYSREG32(V, R) + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_ARM_ARM32_PROCESSOR_H */ +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/include/asm-arm/arm64/processor.h b/xen/include/asm-arm/arm64/processor.h new file mode 100644 index 0000000..fdb0dab --- /dev/null +++ b/xen/include/asm-arm/arm64/processor.h @@ -0,0 +1,37 @@ +#ifndef __ASM_ARM_ARM64_PROCESSOR_H +#define __ASM_ARM_ARM64_PROCESSOR_H + +#ifndef __ASSEMBLY__ + +#define READ_SYSREG32(name) ({ \ + uint32_t _r; \ + asm volatile("mrs %0, "#name : "=r" (_r)); \ + _r; }) +#define WRITE_SYSREG32(v, name) do { \ + uint32_t _r = v; \ + asm volatile("msr "#name", %0" : : "r" (_r)); \ +} while (0) + +#define WRITE_SYSREG64(v, name) do { \ + uint64_t _r = v; \ + asm volatile("msr "#name", %0" : : "r" (_r)); \ +} while (0) +#define READ_SYSREG64(name) ({ \ + uint64_t _r; \ + asm volatile("mrs %0, "#name : "=r" (_r)); \ + _r; }) + +#define READ_SYSREG(name) READ_SYSREG64(name) +#define WRITE_SYSREG(v, name) WRITE_SYSREG64(v, name) + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_ARM_ARM64_PROCESSOR_H */ +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/include/asm-arm/cpregs.h b/xen/include/asm-arm/cpregs.h index 3b51845..7eaa50f 100644 --- a/xen/include/asm-arm/cpregs.h +++ b/xen/include/asm-arm/cpregs.h @@ -3,40 +3,12 @@ #include <xen/stringify.h> -/* Co-processor registers */ - -/* Layout as used in assembly, with src/dest registers mixed in */ -#define __CP32(r, coproc, opc1, crn, crm, opc2) coproc, opc1, r, crn, crm, opc2 -#define __CP64(r1, r2, coproc, opc, crm) coproc, opc, r1, r2, crm -#define CP32(r, name...) __CP32(r, name) -#define CP64(r, name...) __CP64(r, name) - -/* Stringified for inline assembly */ -#define LOAD_CP32(r, name...) "mrc " __stringify(CP32(%r, name)) ";" -#define STORE_CP32(r, name...) "mcr " __stringify(CP32(%r, name)) ";" -#define LOAD_CP64(r, name...) "mrrc " __stringify(CP64(%r, %H##r, name)) ";" -#define STORE_CP64(r, name...) "mcrr " __stringify(CP64(%r, %H##r, name)) ";" - -/* C wrappers */ -#define READ_CP32(name...) ({ \ - register uint32_t _r; \ - asm volatile(LOAD_CP32(0, name) : "=r" (_r)); \ - _r; }) - -#define WRITE_CP32(v, name...) do { \ - register uint32_t _r = (v); \ - asm volatile(STORE_CP32(0, name) : : "r" (_r)); \ -} while (0) - -#define READ_CP64(name...) ({ \ - register uint64_t _r; \ - asm volatile(LOAD_CP64(0, name) : "=r" (_r)); \ - _r; }) - -#define WRITE_CP64(v, name...) do { \ - register uint64_t _r = (v); \ - asm volatile(STORE_CP64(0, name) : : "r" (_r)); \ -} while (0) +/* + * AArch32 Co-processor registers. + * + * Note that AArch64 requires many of these definitions in order to + * support 32-bit guests. + */ #define __HSR_CPREG_c0 0 #define __HSR_CPREG_c1 1 diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h index 0c94f6b..0768cd4 100644 --- a/xen/include/asm-arm/processor.h +++ b/xen/include/asm-arm/processor.h @@ -225,8 +225,13 @@ union hsr { #define ID_PFR1_GT_MASK 0x000F0000 /* Generic Timer interface support */ #define ID_PFR1_GT_v1 0x00010000 -#define MSR(reg,val) asm volatile ("msr "#reg", %0\n" : : "r" (val)) -#define MRS(val,reg) asm volatile ("mrs %0,"#reg"\n" : "=r" (v)) +#if defined(CONFIG_ARM_32) +# include <asm/arm32/processor.h> +#elif defined(CONFIG_ARM_64) +# include <asm/arm64/processor.h> +#else +# error "unknown ARM variant" +#endif #ifndef __ASSEMBLY__ extern uint32_t hyp_traps_vector[8]; -- 1.7.2.5
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- v2: remove comment wondering if they should be inner-shareable flushes, they shouldn''t for now. combine with other patch titled "TLB flushes." which followed in a couple of patches time. remove flush_guest_tlb(), nothing was calling it remove stray reference to flush branch predictor, which isn''t necessary on 64-bit. --- xen/include/asm-arm/arm32/flushtlb.h | 34 +++++++++++++++++ xen/include/asm-arm/arm32/page.h | 69 ++++++++++++++++++++++++++++++++++ xen/include/asm-arm/arm64/flushtlb.h | 34 +++++++++++++++++ xen/include/asm-arm/arm64/page.h | 67 +++++++++++++++++++++++++++++++++ xen/include/asm-arm/flushtlb.h | 34 +++++------------ xen/include/asm-arm/page.h | 67 ++++----------------------------- 6 files changed, 222 insertions(+), 83 deletions(-) create mode 100644 xen/include/asm-arm/arm32/flushtlb.h create mode 100644 xen/include/asm-arm/arm32/page.h create mode 100644 xen/include/asm-arm/arm64/flushtlb.h create mode 100644 xen/include/asm-arm/arm64/page.h diff --git a/xen/include/asm-arm/arm32/flushtlb.h b/xen/include/asm-arm/arm32/flushtlb.h new file mode 100644 index 0000000..3c2d5b6 --- /dev/null +++ b/xen/include/asm-arm/arm32/flushtlb.h @@ -0,0 +1,34 @@ +#ifndef __ASM_ARM_ARM32_FLUSHTLB_H__ +#define __ASM_ARM_ARM32_FLUSHTLB_H__ + +/* Flush local TLBs, current VMID only */ +static inline void flush_tlb_local(void) +{ + dsb(); + + WRITE_CP32((uint32_t) 0, TLBIALLIS); + + dsb(); + isb(); +} + +/* Flush local TLBs, all VMIDs, non-hypervisor mode */ +static inline void flush_tlb_all_local(void) +{ + dsb(); + + WRITE_CP32((uint32_t) 0, TLBIALLNSNHIS); + + dsb(); + isb(); +} + +#endif /* __ASM_ARM_ARM32_FLUSHTLB_H__ */ +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h new file mode 100644 index 0000000..073b8d1 --- /dev/null +++ b/xen/include/asm-arm/arm32/page.h @@ -0,0 +1,69 @@ +#ifndef __ARM_ARM32_PAGE_H__ +#define __ARM_ARM32_PAGE_H__ + +#ifndef __ASSEMBLY__ + +/* + * Flush all hypervisor mappings from the TLB and branch predictor. + * This is needed after changing Xen code mappings. + * + * The caller needs to issue the necessary DSB and D-cache flushes + * before calling flush_xen_text_tlb. + */ +static inline void flush_xen_text_tlb(void) +{ + register unsigned long r0 asm ("r0"); + asm volatile ( + "isb;" /* Ensure synchronization with previous changes to text */ + STORE_CP32(0, TLBIALLH) /* Flush hypervisor TLB */ + STORE_CP32(0, ICIALLU) /* Flush I-cache */ + STORE_CP32(0, BPIALL) /* Flush branch predictor */ + "dsb;" /* Ensure completion of TLB+BP flush */ + "isb;" + : : "r" (r0) /*dummy*/ : "memory"); +} + +/* + * Flush all hypervisor mappings from the data TLB. This is not + * sufficient when changing code mappings or for self modifying code. + */ +static inline void flush_xen_data_tlb(void) +{ + register unsigned long r0 asm ("r0"); + asm volatile("dsb;" /* Ensure preceding are visible */ + STORE_CP32(0, TLBIALLH) + "dsb;" /* Ensure completion of the TLB flush */ + "isb;" + : : "r" (r0) /* dummy */: "memory"); +} + +/* + * Flush a range of VA''s hypervisor mappings from the data TLB. This is not + * sufficient when changing code mappings or for self modifying code. + */ +static inline void flush_xen_data_tlb_range_va(unsigned long va, unsigned long size) +{ + unsigned long end = va + size; + dsb(); /* Ensure preceding are visible */ + while ( va < end ) { + asm volatile(STORE_CP32(0, TLBIMVAH) + : : "r" (va) : "memory"); + va += PAGE_SIZE; + } + dsb(); /* Ensure completion of the TLB flush */ + isb(); +} + +#endif /* __ASSEMBLY__ */ + +#endif /* __ARM_ARM32_PAGE_H__ */ + +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/include/asm-arm/arm64/flushtlb.h b/xen/include/asm-arm/arm64/flushtlb.h new file mode 100644 index 0000000..ca74fe3 --- /dev/null +++ b/xen/include/asm-arm/arm64/flushtlb.h @@ -0,0 +1,34 @@ +#ifndef __ASM_ARM_ARM64_FLUSHTLB_H__ +#define __ASM_ARM_ARM64_FLUSHTLB_H__ + +/* Flush local TLBs, current VMID only */ +static inline void flush_tlb_local(void) +{ + asm volatile( + "dsb sy;" + "tlbi vmalle1;" + "dsb sy;" + "isb;" + : : : "memory"); +} + +/* Flush local TLBs, all VMIDs, non-hypervisor mode */ +static inline void flush_tlb_all_local(void) +{ + asm volatile( + "dsb sy;" + "tlbi alle1;" + "dsb sy;" + "isb;" + : : : "memory"); +} + +#endif /* __ASM_ARM_ARM64_FLUSHTLB_H__ */ +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h new file mode 100644 index 0000000..636fb63 --- /dev/null +++ b/xen/include/asm-arm/arm64/page.h @@ -0,0 +1,67 @@ +#ifndef __ARM_ARM64_PAGE_H__ +#define __ARM_ARM64_PAGE_H__ + +#ifndef __ASSEMBLY__ + +/* + * Flush all hypervisor mappings from the TLB + * This is needed after changing Xen code mappings. + * + * The caller needs to issue the necessary DSB and D-cache flushes + * before calling flush_xen_text_tlb. + */ +static inline void flush_xen_text_tlb(void) +{ + asm volatile ( + "isb;" /* Ensure synchronization with previous changes to text */ + "tlbi alle2;" /* Flush hypervisor TLB */ + "ic iallu;" /* Flush I-cache */ + "dsb sy;" /* Ensure completion of TLB flush */ + "isb;" + : : : "memory"); +} + +/* + * Flush all hypervisor mappings from the data TLB. This is not + * sufficient when changing code mappings or for self modifying code. + */ +static inline void flush_xen_data_tlb(void) +{ + asm volatile ( + "dsb sy;" /* Ensure visibility of PTE writes */ + "tlbi alle2;" /* Flush hypervisor TLB */ + "dsb sy;" /* Ensure completion of TLB flush */ + "isb;" + : : : "memory"); +} + +/* + * Flush a range of VA''s hypervisor mappings from the data TLB. This is not + * sufficient when changing code mappings or for self modifying code. + */ +static inline void flush_xen_data_tlb_range_va(unsigned long va, unsigned long size) +{ + unsigned long end = va + size; + dsb(); /* Ensure preceding are visible */ + while ( va < end ) { + asm volatile("tlbi vae2, %0;" + : : "r" (va>>PAGE_SHIFT) : "memory"); + va += PAGE_SIZE; + } + dsb(); /* Ensure completion of the TLB flush */ + isb(); +} + +#endif /* __ASSEMBLY__ */ + +#endif /* __ARM_ARM64_PAGE_H__ */ + +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/include/asm-arm/flushtlb.h b/xen/include/asm-arm/flushtlb.h index 210abfa..e7ce27b 100644 --- a/xen/include/asm-arm/flushtlb.h +++ b/xen/include/asm-arm/flushtlb.h @@ -1,5 +1,5 @@ -#ifndef __FLUSHTLB_H__ -#define __FLUSHTLB_H__ +#ifndef __ASM_ARM_FLUSHTLB_H__ +#define __ASM_ARM_FLUSHTLB_H__ #include <xen/cpumask.h> @@ -14,32 +14,18 @@ do { \ #define tlbflush_current_time() (0) -/* Flush local TLBs, current VMID only */ -static inline void flush_tlb_local(void) -{ - dsb(); - - WRITE_CP32((uint32_t) 0, TLBIALLIS); - - dsb(); - isb(); -} - -/* Flush local TLBs, all VMIDs, non-hypervisor mode */ -static inline void flush_tlb_all_local(void) -{ - dsb(); - - WRITE_CP32((uint32_t) 0, TLBIALLNSNHIS); - - dsb(); - isb(); -} +#if defined(CONFIG_ARM_32) +# include <asm/arm32/flushtlb.h> +#elif defined(CONFIG_ARM_64) +# include <asm/arm64/flushtlb.h> +#else +# error "unknown ARM variant" +#endif /* Flush specified CPUs'' TLBs */ void flush_tlb_mask(const cpumask_t *mask); -#endif /* __FLUSHTLB_H__ */ +#endif /* __ASM_ARM_FLUSHTLB_H__ */ /* * Local variables: * mode: C diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h index e0a636f..709a508 100644 --- a/xen/include/asm-arm/page.h +++ b/xen/include/asm-arm/page.h @@ -250,6 +250,14 @@ static inline void write_pte(lpae_t *p, lpae_t pte) : : "r" (pte.bits), "r" (p) : "memory"); } +#if defined(CONFIG_ARM_32) +# include <asm/arm32/page.h> +#elif defined(CONFIG_ARM_64) +# include <asm/arm64/page.h> +#else +# error "unknown ARM variant" +#endif + /* Architectural minimum cacheline size is 4 32-bit words. */ #define MIN_CACHELINE_BYTES 16 /* Actual cacheline size on the boot CPU. */ @@ -282,65 +290,6 @@ static inline void flush_xen_dcache_va_range(void *p, unsigned long size) : : "r" (_p), "m" (*_p)); \ } while (0) - -/* - * Flush all hypervisor mappings from the TLB and branch predictor. - * This is needed after changing Xen code mappings. - * - * The caller needs to issue the necessary DSB and D-cache flushes - * before calling flush_xen_text_tlb. - */ -static inline void flush_xen_text_tlb(void) -{ - register unsigned long r0 asm ("r0"); - asm volatile ( - "isb;" /* Ensure synchronization with previous changes to text */ - STORE_CP32(0, TLBIALLH) /* Flush hypervisor TLB */ - STORE_CP32(0, ICIALLU) /* Flush I-cache */ - STORE_CP32(0, BPIALL) /* Flush branch predictor */ - "dsb;" /* Ensure completion of TLB+BP flush */ - "isb;" - : : "r" (r0) /*dummy*/ : "memory"); -} - -/* - * Flush all hypervisor mappings from the data TLB. This is not - * sufficient when changing code mappings or for self modifying code. - */ -static inline void flush_xen_data_tlb(void) -{ - register unsigned long r0 asm ("r0"); - asm volatile("dsb;" /* Ensure preceding are visible */ - STORE_CP32(0, TLBIALLH) - "dsb;" /* Ensure completion of the TLB flush */ - "isb;" - : : "r" (r0) /* dummy */: "memory"); -} - -/* - * Flush a range of VA''s hypervisor mappings from the data TLB. This is not - * sufficient when changing code mappings or for self modifying code. - */ -static inline void flush_xen_data_tlb_range_va(unsigned long va, unsigned long size) -{ - unsigned long end = va + size; - dsb(); /* Ensure preceding are visible */ - while ( va < end ) { - asm volatile(STORE_CP32(0, TLBIMVAH) - : : "r" (va) : "memory"); - va += PAGE_SIZE; - } - dsb(); /* Ensure completion of the TLB flush */ - isb(); -} - -/* Flush all non-hypervisor mappings from the TLB */ -static inline void flush_guest_tlb(void) -{ - register unsigned long r0 asm ("r0"); - WRITE_CP32(r0 /* dummy */, TLBIALLNSNH); -} - /* Print a walk of an arbitrary page table */ void dump_pt_walk(lpae_t *table, paddr_t addr); -- 1.7.2.5
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- xen/include/asm-arm/arm32/page.h | 20 ++++++++++++++++++++ xen/include/asm-arm/arm64/page.h | 15 +++++++++++++++ xen/include/asm-arm/page.h | 20 -------------------- 3 files changed, 35 insertions(+), 20 deletions(-) diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h index 073b8d1..a384f04 100644 --- a/xen/include/asm-arm/arm32/page.h +++ b/xen/include/asm-arm/arm32/page.h @@ -3,6 +3,26 @@ #ifndef __ASSEMBLY__ +/* Write a pagetable entry. + * + * If the table entry is changing a text mapping, it is responsibility + * of the caller to issue an ISB after write_pte. + */ +static inline void write_pte(lpae_t *p, lpae_t pte) +{ + asm volatile ( + /* Ensure any writes have completed with the old mappings. */ + "dsb;" + /* Safely write the entry (STRD is atomic on CPUs that support LPAE) */ + "strd %0, %H0, [%1];" + "dsb;" + /* Push this cacheline to the PoC so the rest of the system sees it. */ + STORE_CP32(1, DCCMVAC) + /* Ensure that the data flush is completed before proceeding */ + "dsb;" + : : "r" (pte.bits), "r" (p) : "memory"); +} + /* * Flush all hypervisor mappings from the TLB and branch predictor. * This is needed after changing Xen code mappings. diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h index 636fb63..99b7296 100644 --- a/xen/include/asm-arm/arm64/page.h +++ b/xen/include/asm-arm/arm64/page.h @@ -3,6 +3,21 @@ #ifndef __ASSEMBLY__ +/* Write a pagetable entry */ +static inline void write_pte(lpae_t *p, lpae_t pte) +{ + asm volatile ( + /* Ensure any writes have completed with the old mappings. */ + "dsb sy;" + "str %0, [%1];" /* Write the entry */ + "dsb sy;" + /* Push this cacheline to the PoC so the rest of the system sees it. */ + "dc cvac, %1;" + /* Ensure that the data flush is completed before proceeding */ + "dsb sy;" + : : "r" (pte.bits), "r" (p) : "memory"); +} + /* * Flush all hypervisor mappings from the TLB * This is needed after changing Xen code mappings. diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h index 709a508..4e245a9 100644 --- a/xen/include/asm-arm/page.h +++ b/xen/include/asm-arm/page.h @@ -230,26 +230,6 @@ static inline lpae_t mfn_to_p2m_entry(unsigned long mfn, unsigned int mattr) return e; } -/* Write a pagetable entry. - * - * If the table entry is changing a text mapping, it is responsibility - * of the caller to issue an ISB after write_pte. - */ -static inline void write_pte(lpae_t *p, lpae_t pte) -{ - asm volatile ( - /* Ensure any writes have completed with the old mappings. */ - "dsb;" - /* Safely write the entry (STRD is atomic on CPUs that support LPAE) */ - "strd %0, %H0, [%1];" - "dsb;" - /* Push this cacheline to the PoC so the rest of the system sees it. */ - STORE_CP32(1, DCCMVAC) - /* Ensure that the data flush is completed before proceeding */ - "dsb;" - : : "r" (pte.bits), "r" (p) : "memory"); -} - #if defined(CONFIG_ARM_32) # include <asm/arm32/page.h> #elif defined(CONFIG_ARM_64) -- 1.7.2.5
Use "dsb sy" instead of bare "dsb", they mean the same on 32-bit but only the former is valid on 64-bit. Abstract the actual flush operation into a macro. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- v2: revert to inline asm --- xen/include/asm-arm/arm32/page.h | 3 +++ xen/include/asm-arm/arm64/page.h | 3 +++ xen/include/asm-arm/page.h | 8 ++++---- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h index a384f04..2b15c22 100644 --- a/xen/include/asm-arm/arm32/page.h +++ b/xen/include/asm-arm/arm32/page.h @@ -23,6 +23,9 @@ static inline void write_pte(lpae_t *p, lpae_t pte) : : "r" (pte.bits), "r" (p) : "memory"); } +/* Inline ASM to flush dcache on register R (may be an inline asm operand) */ +#define __flush_xen_dcache_one(R) STORE_CP32(R, DCCMVAC) + /* * Flush all hypervisor mappings from the TLB and branch predictor. * This is needed after changing Xen code mappings. diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h index 99b7296..4911ba3 100644 --- a/xen/include/asm-arm/arm64/page.h +++ b/xen/include/asm-arm/arm64/page.h @@ -18,6 +18,9 @@ static inline void write_pte(lpae_t *p, lpae_t pte) : : "r" (pte.bits), "r" (p) : "memory"); } +/* Inline ASM to flush dcache on register R (may be an inline asm operand) */ +#define __flush_xen_dcache_one(R) "dc cvac, %" #R ";" + /* * Flush all hypervisor mappings from the TLB * This is needed after changing Xen code mappings. diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h index 4e245a9..b89238b 100644 --- a/xen/include/asm-arm/page.h +++ b/xen/include/asm-arm/page.h @@ -251,7 +251,7 @@ static inline void flush_xen_dcache_va_range(void *p, unsigned long size) void *end; dsb(); /* So the CPU issues all writes to the range */ for ( end = p + size; p < end; p += cacheline_bytes ) - WRITE_CP32((uint32_t) p, DCCMVAC); + asm volatile (__flush_xen_dcache_one(0) : : "r" (p)); dsb(); /* So we know the flushes happen before continuing */ } @@ -264,9 +264,9 @@ static inline void flush_xen_dcache_va_range(void *p, unsigned long size) flush_xen_dcache_va_range(_p, sizeof(x)); \ else \ asm volatile ( \ - "dsb;" /* Finish all earlier writes */ \ - STORE_CP32(0, DCCMVAC) \ - "dsb;" /* Finish flush before continuing */ \ + "dsb sy;" /* Finish all earlier writes */ \ + __flush_xen_dcache_one(0) \ + "dsb sy;" /* Finish flush before continuing */ \ : : "r" (_p), "m" (*_p)); \ } while (0) -- 1.7.2.5
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- I''m torn between unsigned long and vaddr_t... --- xen/include/asm-arm/arm32/page.h | 34 ++++++++++++++++++++++++++++++++++ xen/include/asm-arm/arm64/page.h | 35 +++++++++++++++++++++++++++++++++++ xen/include/asm-arm/page.h | 38 ++------------------------------------ xen/include/asm-arm/types.h | 4 ++++ 4 files changed, 75 insertions(+), 36 deletions(-) diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h index 2b15c22..d295316 100644 --- a/xen/include/asm-arm/arm32/page.h +++ b/xen/include/asm-arm/arm32/page.h @@ -77,6 +77,40 @@ static inline void flush_xen_data_tlb_range_va(unsigned long va, unsigned long s isb(); } +/* Ask the MMU to translate a VA for us */ +static inline uint64_t __va_to_par(vaddr_t va) +{ + uint64_t par, tmp; + tmp = READ_CP64(PAR); + WRITE_CP32(va, ATS1HR); + isb(); /* Ensure result is available. */ + par = READ_CP64(PAR); + WRITE_CP64(tmp, PAR); + return par; +} + +/* Ask the MMU to translate a Guest VA for us */ +static inline uint64_t gva_to_ma_par(vaddr_t va) +{ + uint64_t par, tmp; + tmp = READ_CP64(PAR); + WRITE_CP32(va, ATS12NSOPR); + isb(); /* Ensure result is available. */ + par = READ_CP64(PAR); + WRITE_CP64(tmp, PAR); + return par; +} +static inline uint64_t gva_to_ipa_par(vaddr_t va) +{ + uint64_t par, tmp; + tmp = READ_CP64(PAR); + WRITE_CP32(va, ATS1CPR); + isb(); /* Ensure result is available. */ + par = READ_CP64(PAR); + WRITE_CP64(tmp, PAR); + return par; +} + #endif /* __ASSEMBLY__ */ #endif /* __ARM_ARM32_PAGE_H__ */ diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h index 4911ba3..9bf41fb 100644 --- a/xen/include/asm-arm/arm64/page.h +++ b/xen/include/asm-arm/arm64/page.h @@ -70,6 +70,41 @@ static inline void flush_xen_data_tlb_range_va(unsigned long va, unsigned long s isb(); } +/* Ask the MMU to translate a VA for us */ +static inline uint64_t __va_to_par(vaddr_t va) +{ + uint64_t par, tmp = READ_SYSREG64(PAR_EL1); + + asm volatile ("at s1e2r, %0;" : : "r" (va)); + isb(); + par = READ_SYSREG64(PAR_EL1); + WRITE_SYSREG64(tmp, PAR_EL1); + return par; +} + +/* Ask the MMU to translate a Guest VA for us */ +static inline uint64_t gva_to_ma_par(vaddr_t va) +{ + uint64_t par, tmp = READ_SYSREG64(PAR_EL1); + + asm volatile ("at s12e1r, %0;" : : "r" (va)); + isb(); + par = READ_SYSREG64(PAR_EL1); + WRITE_SYSREG64(tmp, PAR_EL1); + return par; +} + +static inline uint64_t gva_to_ipa_par(vaddr_t va) +{ + uint64_t par, tmp = READ_SYSREG64(PAR_EL1); + + asm volatile ("at s1e1r, %0;" : : "r" (va)); + isb(); + par = READ_SYSREG64(PAR_EL1); + WRITE_SYSREG64(tmp, PAR_EL1); + return par; +} + #endif /* __ASSEMBLY__ */ #endif /* __ARM_ARM64_PAGE_H__ */ diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h index b89238b..ad52567 100644 --- a/xen/include/asm-arm/page.h +++ b/xen/include/asm-arm/page.h @@ -278,19 +278,7 @@ extern void dump_hyp_walk(uint32_t addr); /* Print a walk of the p2m for a domain for a physical address. */ extern void dump_p2m_lookup(struct domain *d, paddr_t addr); -/* Ask the MMU to translate a VA for us */ -static inline uint64_t __va_to_par(uint32_t va) -{ - uint64_t par, tmp; - tmp = READ_CP64(PAR); - WRITE_CP32(va, ATS1HR); - isb(); /* Ensure result is available. */ - par = READ_CP64(PAR); - WRITE_CP64(tmp, PAR); - return par; -} - -static inline uint64_t va_to_par(uint32_t va) +static inline uint64_t va_to_par(vaddr_t va) { uint64_t par = __va_to_par(va); /* It is not OK to call this with an invalid VA */ @@ -302,29 +290,7 @@ static inline uint64_t va_to_par(uint32_t va) return par; } -/* Ask the MMU to translate a Guest VA for us */ -static inline uint64_t gva_to_ma_par(uint32_t va) -{ - uint64_t par, tmp; - tmp = READ_CP64(PAR); - WRITE_CP32(va, ATS12NSOPR); - isb(); /* Ensure result is available. */ - par = READ_CP64(PAR); - WRITE_CP64(tmp, PAR); - return par; -} -static inline uint64_t gva_to_ipa_par(uint32_t va) -{ - uint64_t par, tmp; - tmp = READ_CP64(PAR); - WRITE_CP32(va, ATS1CPR); - isb(); /* Ensure result is available. */ - par = READ_CP64(PAR); - WRITE_CP64(tmp, PAR); - return par; -} - -static inline int gva_to_ipa(uint32_t va, paddr_t *paddr) +static inline int gva_to_ipa(vaddr_t va, paddr_t *paddr) { uint64_t par = gva_to_ipa_par(va); if ( par & PAR_F ) diff --git a/xen/include/asm-arm/types.h b/xen/include/asm-arm/types.h index 07f7898..d3e16d8 100644 --- a/xen/include/asm-arm/types.h +++ b/xen/include/asm-arm/types.h @@ -36,12 +36,16 @@ typedef unsigned int u32; #if defined(CONFIG_ARM_32) typedef signed long long s64; typedef unsigned long long u64; +typedef u32 vaddr_t; +#define PRIvaddr PRIx32 typedef u64 paddr_t; #define INVALID_PADDR (~0ULL) #define PRIpaddr "016llx" #elif defined (CONFIG_ARM_64) typedef signed long s64; typedef unsigned long u64; +typedef u64 vaddr_t; +#define PRIvaddr PRIx64 typedef u64 paddr_t; #define INVALID_PADDR (~0UL) #define PRIpaddr "016lx" -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 14/46] xen: arm64: barriers and wait for interrupts/events
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- xen/include/asm-arm/arm32/system.h | 29 +++++++++++++++++++++++++++++ xen/include/asm-arm/arm64/system.h | 28 ++++++++++++++++++++++++++++ xen/include/asm-arm/system.h | 20 ++++++++------------ 3 files changed, 65 insertions(+), 12 deletions(-) create mode 100644 xen/include/asm-arm/arm32/system.h create mode 100644 xen/include/asm-arm/arm64/system.h diff --git a/xen/include/asm-arm/arm32/system.h b/xen/include/asm-arm/arm32/system.h new file mode 100644 index 0000000..91098a0 --- /dev/null +++ b/xen/include/asm-arm/arm32/system.h @@ -0,0 +1,29 @@ +/* Portions taken from Linux arch arm */ +#ifndef __ASM_ARM32_SYSTEM_H +#define __ASM_ARM32_SYSTEM_H + +#define sev() __asm__ __volatile__ ("sev" : : : "memory") +#define wfe() __asm__ __volatile__ ("wfe" : : : "memory") +#define wfi() __asm__ __volatile__ ("wfi" : : : "memory") + +#define isb() __asm__ __volatile__ ("isb" : : : "memory") +#define dsb() __asm__ __volatile__ ("dsb" : : : "memory") +#define dmb() __asm__ __volatile__ ("dmb" : : : "memory") + +#define mb() dsb() +#define rmb() dsb() +#define wmb() mb() + +#define smp_mb() dmb() +#define smp_rmb() dmb() +#define smp_wmb() dmb() + +#endif +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/include/asm-arm/arm64/system.h b/xen/include/asm-arm/arm64/system.h new file mode 100644 index 0000000..33c031d --- /dev/null +++ b/xen/include/asm-arm/arm64/system.h @@ -0,0 +1,28 @@ +/* Portions taken from Linux arch arm64 */ +#ifndef __ASM_ARM64_SYSTEM_H +#define __ASM_ARM64_SYSTEM_H + +#define sev() asm volatile("sev" : : : "memory") +#define wfe() asm volatile("wfe" : : : "memory") +#define wfi() asm volatile("wfi" : : : "memory") + +#define isb() asm volatile("isb" : : : "memory") +#define dsb() asm volatile("dsb sy" : : : "memory") + +#define mb() dsb() +#define rmb() asm volatile("dsb ld" : : : "memory") +#define wmb() asm volatile("dsb st" : : : "memory") + +#define smp_mb() asm volatile("dmb ish" : : : "memory") +#define smp_rmb() asm volatile("dmb ishld" : : : "memory") +#define smp_wmb() asm volatile("dmb ishst" : : : "memory") + +#endif +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/include/asm-arm/system.h b/xen/include/asm-arm/system.h index 216ef1f..8b4c97a 100644 --- a/xen/include/asm-arm/system.h +++ b/xen/include/asm-arm/system.h @@ -11,18 +11,6 @@ #define xchg(ptr,x) \ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) -#define isb() __asm__ __volatile__ ("isb" : : : "memory") -#define dsb() __asm__ __volatile__ ("dsb" : : : "memory") -#define dmb() __asm__ __volatile__ ("dmb" : : : "memory") - -#define mb() dsb() -#define rmb() dsb() -#define wmb() mb() - -#define smp_mb() dmb() -#define smp_rmb() dmb() -#define smp_wmb() dmb() - /* * This is used to ensure the compiler did actually allocate the register we * asked it for some inline assembly sequences. Apparently we can''t trust @@ -33,6 +21,14 @@ */ #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" +#if defined(CONFIG_ARM_32) +# include <asm/arm32/system.h> +#elif defined(CONFIG_ARM_64) +# include <asm/arm64/system.h> +#else +# error "unknown ARM variant" +#endif + extern void __bad_xchg(volatile void *, int); static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) -- 1.7.2.5
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- xen/include/asm-arm/arm32/system.h | 115 ++++++++++++++++++++++++++ xen/include/asm-arm/arm64/system.h | 155 ++++++++++++++++++++++++++++++++++++ xen/include/asm-arm/system.h | 114 -------------------------- 3 files changed, 270 insertions(+), 114 deletions(-) diff --git a/xen/include/asm-arm/arm32/system.h b/xen/include/asm-arm/arm32/system.h index 91098a0..9dbe8e3 100644 --- a/xen/include/asm-arm/arm32/system.h +++ b/xen/include/asm-arm/arm32/system.h @@ -18,6 +18,121 @@ #define smp_rmb() dmb() #define smp_wmb() dmb() +extern void __bad_xchg(volatile void *, int); + +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) +{ + unsigned long ret; + unsigned int tmp; + + smp_mb(); + + switch (size) { + case 1: + asm volatile("@ __xchg1\n" + "1: ldrexb %0, [%3]\n" + " strexb %1, %2, [%3]\n" + " teq %1, #0\n" + " bne 1b" + : "=&r" (ret), "=&r" (tmp) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; + case 4: + asm volatile("@ __xchg4\n" + "1: ldrex %0, [%3]\n" + " strex %1, %2, [%3]\n" + " teq %1, #0\n" + " bne 1b" + : "=&r" (ret), "=&r" (tmp) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; + default: + __bad_xchg(ptr, size), ret = 0; + break; + } + smp_mb(); + + return ret; +} + +/* + * Atomic compare and exchange. Compare OLD with MEM, if identical, + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + */ + +extern void __bad_cmpxchg(volatile void *ptr, int size); + +static always_inline unsigned long __cmpxchg( + volatile void *ptr, unsigned long old, unsigned long new, int size) +{ + unsigned long /*long*/ oldval, res; + + switch (size) { + case 1: + do { + asm volatile("@ __cmpxchg1\n" + " ldrexb %1, [%2]\n" + " mov %0, #0\n" + " teq %1, %3\n" + " strexbeq %0, %4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (old), "r" (new) + : "memory", "cc"); + } while (res); + break; + case 2: + do { + asm volatile("@ __cmpxchg2\n" + " ldrexh %1, [%2]\n" + " mov %0, #0\n" + " teq %1, %3\n" + " strexheq %0, %4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (old), "r" (new) + : "memory", "cc"); + } while (res); + break; + case 4: + do { + asm volatile("@ __cmpxchg4\n" + " ldrex %1, [%2]\n" + " mov %0, #0\n" + " teq %1, %3\n" + " strexeq %0, %4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (old), "r" (new) + : "memory", "cc"); + } while (res); + break; +#if 0 + case 8: + do { + asm volatile("@ __cmpxchg8\n" + " ldrexd %1, [%2]\n" + " mov %0, #0\n" + " teq %1, %3\n" + " strexdeq %0, %4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (old), "r" (new) + : "memory", "cc"); + } while (res); + break; +#endif + default: + __bad_cmpxchg(ptr, size); + oldval = 0; + } + + return oldval; +} + +#define cmpxchg(ptr,o,n) \ + ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o), \ + (unsigned long)(n),sizeof(*(ptr)))) + #endif /* * Local variables: diff --git a/xen/include/asm-arm/arm64/system.h b/xen/include/asm-arm/arm64/system.h index 33c031d..6fd26f8 100644 --- a/xen/include/asm-arm/arm64/system.h +++ b/xen/include/asm-arm/arm64/system.h @@ -17,6 +17,161 @@ #define smp_rmb() asm volatile("dmb ishld" : : : "memory") #define smp_wmb() asm volatile("dmb ishst" : : : "memory") + +extern void __bad_xchg(volatile void *, int); + +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) +{ + unsigned long ret, tmp; + + switch (size) { + case 1: + asm volatile("// __xchg1\n" + "1: ldaxrb %w0, [%3]\n" + " stlxrb %w1, %w2, [%3]\n" + " cbnz %w1, 1b\n" + : "=&r" (ret), "=&r" (tmp) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; + case 2: + asm volatile("// __xchg2\n" + "1: ldaxrh %w0, [%3]\n" + " stlxrh %w1, %w2, [%3]\n" + " cbnz %w1, 1b\n" + : "=&r" (ret), "=&r" (tmp) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; + case 4: + asm volatile("// __xchg4\n" + "1: ldaxr %w0, [%3]\n" + " stlxr %w1, %w2, [%3]\n" + " cbnz %w1, 1b\n" + : "=&r" (ret), "=&r" (tmp) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; + case 8: + asm volatile("// __xchg8\n" + "1: ldaxr %0, [%3]\n" + " stlxr %w1, %2, [%3]\n" + " cbnz %w1, 1b\n" + : "=&r" (ret), "=&r" (tmp) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; + default: + __bad_xchg(ptr, size), ret = 0; + break; + } + + return ret; +} + +#define xchg(ptr,x) \ + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) + +extern void __bad_cmpxchg(volatile void *ptr, int size); + +static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, + unsigned long new, int size) +{ + unsigned long oldval = 0, res; + + switch (size) { + case 1: + do { + asm volatile("// __cmpxchg1\n" + " ldxrb %w1, [%2]\n" + " mov %w0, #0\n" + " cmp %w1, %w3\n" + " b.ne 1f\n" + " stxrb %w0, %w4, [%2]\n" + "1:\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (old), "r" (new) + : "cc"); + } while (res); + break; + + case 2: + do { + asm volatile("// __cmpxchg2\n" + " ldxrh %w1, [%2]\n" + " mov %w0, #0\n" + " cmp %w1, %w3\n" + " b.ne 1f\n" + " stxrh %w0, %w4, [%2]\n" + "1:\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (old), "r" (new) + : "memory", "cc"); + } while (res); + break; + + case 4: + do { + asm volatile("// __cmpxchg4\n" + " ldxr %w1, [%2]\n" + " mov %w0, #0\n" + " cmp %w1, %w3\n" + " b.ne 1f\n" + " stxr %w0, %w4, [%2]\n" + "1:\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (old), "r" (new) + : "cc"); + } while (res); + break; + + case 8: + do { + asm volatile("// __cmpxchg8\n" + " ldxr %1, [%2]\n" + " mov %w0, #0\n" + " cmp %1, %3\n" + " b.ne 1f\n" + " stxr %w0, %4, [%2]\n" + "1:\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (old), "r" (new) + : "cc"); + } while (res); + break; + + default: + __bad_cmpxchg(ptr, size); + oldval = 0; + } + + return oldval; +} + +static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, + unsigned long new, int size) +{ + unsigned long ret; + + smp_mb(); + ret = __cmpxchg(ptr, old, new, size); + smp_mb(); + + return ret; +} + +#define cmpxchg(ptr,o,n) \ + ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr)))) + +#define cmpxchg_local(ptr,o,n) \ + ((__typeof__(*(ptr)))__cmpxchg((ptr), \ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr)))) + #endif /* * Local variables: diff --git a/xen/include/asm-arm/system.h b/xen/include/asm-arm/system.h index 8b4c97a..e4cb99c 100644 --- a/xen/include/asm-arm/system.h +++ b/xen/include/asm-arm/system.h @@ -29,120 +29,6 @@ # error "unknown ARM variant" #endif -extern void __bad_xchg(volatile void *, int); - -static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) -{ - unsigned long ret; - unsigned int tmp; - - smp_mb(); - - switch (size) { - case 1: - asm volatile("@ __xchg1\n" - "1: ldrexb %0, [%3]\n" - " strexb %1, %2, [%3]\n" - " teq %1, #0\n" - " bne 1b" - : "=&r" (ret), "=&r" (tmp) - : "r" (x), "r" (ptr) - : "memory", "cc"); - break; - case 4: - asm volatile("@ __xchg4\n" - "1: ldrex %0, [%3]\n" - " strex %1, %2, [%3]\n" - " teq %1, #0\n" - " bne 1b" - : "=&r" (ret), "=&r" (tmp) - : "r" (x), "r" (ptr) - : "memory", "cc"); - break; - default: - __bad_xchg(ptr, size), ret = 0; - break; - } - smp_mb(); - - return ret; -} - -/* - * Atomic compare and exchange. Compare OLD with MEM, if identical, - * store NEW in MEM. Return the initial value in MEM. Success is - * indicated by comparing RETURN with OLD. - */ - -extern void __bad_cmpxchg(volatile void *ptr, int size); - -static always_inline unsigned long __cmpxchg( - volatile void *ptr, unsigned long old, unsigned long new, int size) -{ - unsigned long /*long*/ oldval, res; - - switch (size) { - case 1: - do { - asm volatile("@ __cmpxchg1\n" - " ldrexb %1, [%2]\n" - " mov %0, #0\n" - " teq %1, %3\n" - " strexbeq %0, %4, [%2]\n" - : "=&r" (res), "=&r" (oldval) - : "r" (ptr), "Ir" (old), "r" (new) - : "memory", "cc"); - } while (res); - break; - case 2: - do { - asm volatile("@ __cmpxchg2\n" - " ldrexh %1, [%2]\n" - " mov %0, #0\n" - " teq %1, %3\n" - " strexheq %0, %4, [%2]\n" - : "=&r" (res), "=&r" (oldval) - : "r" (ptr), "Ir" (old), "r" (new) - : "memory", "cc"); - } while (res); - break; - case 4: - do { - asm volatile("@ __cmpxchg4\n" - " ldrex %1, [%2]\n" - " mov %0, #0\n" - " teq %1, %3\n" - " strexeq %0, %4, [%2]\n" - : "=&r" (res), "=&r" (oldval) - : "r" (ptr), "Ir" (old), "r" (new) - : "memory", "cc"); - } while (res); - break; -#if 0 - case 8: - do { - asm volatile("@ __cmpxchg8\n" - " ldrexd %1, [%2]\n" - " mov %0, #0\n" - " teq %1, %3\n" - " strexdeq %0, %4, [%2]\n" - : "=&r" (res), "=&r" (oldval) - : "r" (ptr), "Ir" (old), "r" (new) - : "memory", "cc"); - } while (res); - break; -#endif - default: - __bad_cmpxchg(ptr, size); - oldval = 0; - } - - return oldval; -} -#define cmpxchg(ptr,o,n) \ - ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o), \ - (unsigned long)(n),sizeof(*(ptr)))) - #define local_irq_disable() asm volatile ( "cpsid i @ local_irq_disable\n" : : : "cc" ) #define local_irq_enable() asm volatile ( "cpsie i @ local_irq_enable\n" : : : "cc" ) -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 16/46] xen: arm64: interrupt/abort mask/unmask
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- xen/include/asm-arm/arm32/system.h | 44 +++++++++++++++++++++++++++++ xen/include/asm-arm/arm64/system.h | 54 ++++++++++++++++++++++++++++++++++++ xen/include/asm-arm/system.h | 44 ----------------------------- 3 files changed, 98 insertions(+), 44 deletions(-) diff --git a/xen/include/asm-arm/arm32/system.h b/xen/include/asm-arm/arm32/system.h index 9dbe8e3..ac8fcb0 100644 --- a/xen/include/asm-arm/arm32/system.h +++ b/xen/include/asm-arm/arm32/system.h @@ -133,6 +133,50 @@ static always_inline unsigned long __cmpxchg( ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o), \ (unsigned long)(n),sizeof(*(ptr)))) +#define local_irq_disable() asm volatile ( "cpsid i @ local_irq_disable\n" : : : "cc" ) +#define local_irq_enable() asm volatile ( "cpsie i @ local_irq_enable\n" : : : "cc" ) + +#define local_save_flags(x) \ +({ \ + BUILD_BUG_ON(sizeof(x) != sizeof(long)); \ + asm volatile ( "mrs %0, cpsr @ local_save_flags\n" \ + : "=r" (x) :: "memory", "cc" ); \ +}) +#define local_irq_save(x) \ +({ \ + local_save_flags(x); \ + local_irq_disable(); \ +}) +#define local_irq_restore(x) \ +({ \ + BUILD_BUG_ON(sizeof(x) != sizeof(long)); \ + asm volatile ( \ + "msr cpsr_c, %0 @ local_irq_restore\n" \ + : \ + : "r" (flags) \ + : "memory", "cc"); \ +}) + +static inline int local_irq_is_enabled(void) +{ + unsigned long flags; + local_save_flags(flags); + return !(flags & PSR_IRQ_MASK); +} + +#define local_fiq_enable() __asm__("cpsie f @ __stf\n" : : : "memory", "cc") +#define local_fiq_disable() __asm__("cpsid f @ __clf\n" : : : "memory", "cc") + +#define local_abort_enable() __asm__("cpsie a @ __sta\n" : : : "memory", "cc") +#define local_abort_disable() __asm__("cpsid a @ __sta\n" : : : "memory", "cc") + +static inline int local_fiq_is_enabled(void) +{ + unsigned long flags; + local_save_flags(flags); + return !(flags & PSR_FIQ_MASK); +} + #endif /* * Local variables: diff --git a/xen/include/asm-arm/arm64/system.h b/xen/include/asm-arm/arm64/system.h index 6fd26f8..cc7b959 100644 --- a/xen/include/asm-arm/arm64/system.h +++ b/xen/include/asm-arm/arm64/system.h @@ -172,6 +172,60 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, (unsigned long)(n), \ sizeof(*(ptr)))) +/* Uses uimm4 as a bitmask to select the clearing of one or more of + * the DAIF exception mask bits: + * bit 3 selects the D mask, + * bit 2 the A mask, + * bit 1 the I mask and + * bit 0 the F mask. +*/ + +#define local_fiq_disable() asm volatile ( "msr daifset, #1\n" ::: "memory" ) +#define local_fiq_enable() asm volatile ( "msr daifclr, #1\n" ::: "memory" ) +#define local_irq_disable() asm volatile ( "msr daifset, #2\n" ::: "memory" ) +#define local_irq_enable() asm volatile ( "msr daifclr, #2\n" ::: "memory" ) +#define local_abort_disable() asm volatile ( "msr daifset, #4\n" ::: "memory" ) +#define local_abort_enable() asm volatile ( "msr daifclr, #4\n" ::: "memory" ) + +#define local_save_flags(x) \ +({ \ + BUILD_BUG_ON(sizeof(x) != sizeof(long)); \ + asm volatile( \ + "mrs %0, daif // local_save_flags\n" \ + : "=r" (x) \ + : \ + : "memory"); \ +}) + +#define local_irq_save(x) \ +({ \ + local_save_flags(x); \ + local_irq_disable(); \ +}) +#define local_irq_restore(x) \ +({ \ + BUILD_BUG_ON(sizeof(x) != sizeof(long)); \ + asm volatile ( \ + "msr daif, %0 // local_irq_restore" \ + : \ + : "r" (flags) \ + : "memory"); \ +}) + +static inline int local_irq_is_enabled(void) +{ + unsigned long flags; + local_save_flags(flags); + return !(flags & PSR_IRQ_MASK); +} + +static inline int local_fiq_is_enabled(void) +{ + unsigned long flags; + local_save_flags(flags); + return !(flags & PSR_FIQ_MASK); +} + #endif /* * Local variables: diff --git a/xen/include/asm-arm/system.h b/xen/include/asm-arm/system.h index e4cb99c..a26936b 100644 --- a/xen/include/asm-arm/system.h +++ b/xen/include/asm-arm/system.h @@ -29,50 +29,6 @@ # error "unknown ARM variant" #endif -#define local_irq_disable() asm volatile ( "cpsid i @ local_irq_disable\n" : : : "cc" ) -#define local_irq_enable() asm volatile ( "cpsie i @ local_irq_enable\n" : : : "cc" ) - -#define local_save_flags(x) \ -({ \ - BUILD_BUG_ON(sizeof(x) != sizeof(long)); \ - asm volatile ( "mrs %0, cpsr @ local_save_flags\n" \ - : "=r" (x) :: "memory", "cc" ); \ -}) -#define local_irq_save(x) \ -({ \ - local_save_flags(x); \ - local_irq_disable(); \ -}) -#define local_irq_restore(x) \ -({ \ - BUILD_BUG_ON(sizeof(x) != sizeof(long)); \ - asm volatile ( \ - "msr cpsr_c, %0 @ local_irq_restore\n" \ - : \ - : "r" (flags) \ - : "memory", "cc"); \ -}) - -static inline int local_irq_is_enabled(void) -{ - unsigned long flags; - local_save_flags(flags); - return !(flags & PSR_IRQ_MASK); -} - -#define local_fiq_enable() __asm__("cpsie f @ __stf\n" : : : "memory", "cc") -#define local_fiq_disable() __asm__("cpsid f @ __clf\n" : : : "memory", "cc") - -#define local_abort_enable() __asm__("cpsie a @ __sta\n" : : : "memory", "cc") -#define local_abort_disable() __asm__("cpsid a @ __sta\n" : : : "memory", "cc") - -static inline int local_fiq_is_enabled(void) -{ - unsigned long flags; - local_save_flags(flags); - return !!(flags & PSR_FIQ_MASK); -} - extern struct vcpu *__context_switch(struct vcpu *prev, struct vcpu *next); #endif -- 1.7.2.5
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- xen/include/asm-arm/div64.h | 17 ++++++++++++++++- 1 files changed, 16 insertions(+), 1 deletions(-) diff --git a/xen/include/asm-arm/div64.h b/xen/include/asm-arm/div64.h index 7b00808..d5bdc76 100644 --- a/xen/include/asm-arm/div64.h +++ b/xen/include/asm-arm/div64.h @@ -21,6 +21,19 @@ * calling convention for arguments and results (beware). */ + +#if BITS_PER_LONG == 64 + +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + __rem = ((uint64_t)(n)) % __base; \ + (n) = ((uint64_t)(n)) / __base; \ + __rem; \ + }) + +#elif BITS_PER_LONG == 32 + #ifdef __ARMEB__ #define __xh "r0" #define __xl "r1" @@ -222,7 +235,9 @@ __nr; \ }) -#endif +#endif /* GCC version */ + +#endif /* BITS_PER_LONG */ #endif /* -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 18/46] xen: arm64: start of day changes to setup.c
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- v2: s/CSSELR_EL1/CCSIDR_EL1 --- xen/arch/arm/setup.c | 54 ++++++++++++++++++++++++++++-------------- xen/include/asm-arm/cpregs.h | 25 +++++++++++++++++++ 2 files changed, 61 insertions(+), 18 deletions(-) diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c index 4e50b2b..c1f06c9 100644 --- a/xen/arch/arm/setup.c +++ b/xen/arch/arm/setup.c @@ -56,16 +56,34 @@ static void __init init_idle_domain(void) static void __init processor_id(void) { - printk("Processor Features: %08x %08x\n", - READ_CP32(ID_PFR0), READ_CP32(ID_PFR0)); - printk("Debug Features: %08x\n", READ_CP32(ID_DFR0)); - printk("Auxiliary Features: %08x\n", READ_CP32(ID_AFR0)); - printk("Memory Model Features: %08x %08x %08x %08x\n", - READ_CP32(ID_MMFR0), READ_CP32(ID_MMFR1), - READ_CP32(ID_MMFR2), READ_CP32(ID_MMFR3)); - printk("ISA Features: %08x %08x %08x %08x %08x %08x\n", - READ_CP32(ID_ISAR0), READ_CP32(ID_ISAR1), READ_CP32(ID_ISAR2), - READ_CP32(ID_ISAR3), READ_CP32(ID_ISAR4), READ_CP32(ID_ISAR5)); +#if defined(CONFIG_ARM_64) + printk("64-bit Processor Features: %016"PRIx64" %016"PRIx64"\n", + READ_SYSREG64(ID_AA64PFR0_EL1), READ_SYSREG64(ID_AA64PFR1_EL1)); + printk("64-bit Debug Features: %016"PRIx64" %016"PRIx64"\n", + READ_SYSREG64(ID_AA64DFR0_EL1), READ_SYSREG64(ID_AA64DFR1_EL1)); + printk("64-bit Auxiliary Features: %016"PRIx64" %016"PRIx64"\n", + READ_SYSREG64(ID_AA64AFR0_EL1), READ_SYSREG64(ID_AA64AFR1_EL1)); + printk("64-bit Memory Model Features: %016"PRIx64" %016"PRIx64"\n", + READ_SYSREG64(ID_AA64MMFR0_EL1), READ_SYSREG64(ID_AA64MMFR1_EL1)); + printk("64-bit ISA Features: %016"PRIx64" %016"PRIx64"\n", + READ_SYSREG64(ID_AA64ISAR0_EL1), READ_SYSREG64(ID_AA64ISAR1_EL1)); +#endif + /* + * On AArch64 these refer to the capabilities when running in + * AArch32 mode. + */ + printk("32-bit Processor Features: %08x %08x\n", + READ_SYSREG32(ID_PFR0_EL1), READ_SYSREG32(ID_PFR1_EL1)); + printk("32-bit Debug Features: %08x\n", READ_SYSREG32(ID_DFR0_EL1)); + printk("32-bit Auxiliary Features: %08x\n", READ_SYSREG32(ID_AFR0_EL1)); + printk("32-bit Memory Model Features: %08x %08x %08x %08x\n", + READ_SYSREG32(ID_MMFR0_EL1), READ_SYSREG32(ID_MMFR1_EL1), + READ_SYSREG32(ID_MMFR2_EL1), READ_SYSREG32(ID_MMFR3_EL1)); + printk("32-bit ISA Features: %08x %08x %08x %08x %08x %08x\n", + READ_SYSREG32(ID_ISAR0_EL1), READ_SYSREG32(ID_ISAR1_EL1), + READ_SYSREG32(ID_ISAR2_EL1), READ_SYSREG32(ID_ISAR3_EL1), + READ_SYSREG32(ID_ISAR4_EL1), READ_SYSREG32(ID_ISAR5_EL1)); + } void __init discard_initial_modules(void) @@ -250,7 +268,8 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) domheap_pages = heap_pages - xenheap_pages; - printk("Xen heap: %lu pages Dom heap: %lu pages\n", xenheap_pages, domheap_pages); + printk("Xen heap: %lu pages Dom heap: %lu pages\n", + xenheap_pages, domheap_pages); setup_xenheap_mappings((e >> PAGE_SHIFT) - xenheap_pages, xenheap_pages); @@ -320,8 +339,8 @@ void __init setup_cache(void) uint32_t ccsid; /* Read the cache size ID register for the level-0 data cache */ - WRITE_CP32(0, CSSELR); - ccsid = READ_CP32(CCSIDR); + WRITE_SYSREG32(0, CSSELR_EL1); + ccsid = READ_SYSREG32(CCSIDR_EL1); /* Low 3 bits are log2(cacheline size in words) - 2. */ cacheline_bytes = 1U << (4 + (ccsid & 0x7)); @@ -368,16 +387,15 @@ void __init start_xen(unsigned long boot_phys_offset, setup_mm(fdt_paddr, fdt_size); /* Setup Hyp vector base */ - WRITE_CP32((uint32_t) hyp_traps_vector, HVBAR); - printk("Set hyp vector base to %"PRIx32" (expected %p)\n", - READ_CP32(HVBAR), hyp_traps_vector); + WRITE_SYSREG((vaddr_t)hyp_traps_vector, VBAR_EL2); + isb(); /* Setup Stage 2 address translation */ /* SH0=00, ORGN0=IRGN0=01 * SL0=01 (Level-1) * T0SZ=(1)1000 = -8 (40 bit physical addresses) */ - WRITE_CP32(0x80002558, VTCR); isb(); + WRITE_SYSREG32(0x80002558, VTCR_EL2); isb(); processor_id(); @@ -455,7 +473,7 @@ void __init start_xen(unsigned long boot_phys_offset, /* Switch on to the dynamically allocated stack for the idle vcpu * since the static one we''re running on is about to be freed. */ - memcpy(idle_vcpu[0]->arch.cpu_info, get_cpu_info(), + memcpy(idle_vcpu[0]->arch.cpu_info, get_cpu_info(), sizeof(struct cpu_info)); switch_stack_and_jump(idle_vcpu[0]->arch.cpu_info, init_done); } diff --git a/xen/include/asm-arm/cpregs.h b/xen/include/asm-arm/cpregs.h index 7eaa50f..559be75 100644 --- a/xen/include/asm-arm/cpregs.h +++ b/xen/include/asm-arm/cpregs.h @@ -222,6 +222,31 @@ /* CP15 CR15: Implementation Defined Registers */ +/* Aliases of AArch64 names for use in common code when building for AArch32 */ +#ifdef CONFIG_ARM_32 +/* Alphabetically... */ +#define CCSIDR_EL1 CCSIDR +#define CLIDR_EL1 CLIDR +#define CSSELR_EL1 CSSELR +#define ID_AFR0_EL1 ID_AFR0 +#define ID_DFR0_EL1 ID_DFR0 +#define ID_ISAR0_EL1 ID_ISAR0 +#define ID_ISAR1_EL1 ID_ISAR1 +#define ID_ISAR2_EL1 ID_ISAR2 +#define ID_ISAR3_EL1 ID_ISAR3 +#define ID_ISAR4_EL1 ID_ISAR4 +#define ID_ISAR5_EL1 ID_ISAR5 +#define ID_MMFR0_EL1 ID_MMFR0 +#define ID_MMFR1_EL1 ID_MMFR1 +#define ID_MMFR2_EL1 ID_MMFR2 +#define ID_MMFR3_EL1 ID_MMFR3 +#define ID_PFR0_EL1 ID_PFR0 +#define ID_PFR1_EL1 ID_PFR1 +#define VBAR_EL2 HVBAR +#define VTCR_EL2 VTCR + +#endif + #endif /* * Local variables: -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 19/46] xen: arm64: changes to setup_pagetables and mm.c
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- v2: Make *_table_offset return an unsigned int and adjust callers where necessary. Print "TTBR" instead of "TTBR0_EL2" when it is obvious from the ctxt. --- xen/arch/arm/arm32/head.S | 2 +- xen/arch/arm/mm.c | 46 +++++++++++++++++++++++------------------ xen/include/asm-arm/cpregs.h | 2 + xen/include/asm-arm/page.h | 10 +++++--- 4 files changed, 35 insertions(+), 25 deletions(-) diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S index 5ec46c3..db3baa0 100644 --- a/xen/arch/arm/arm32/head.S +++ b/xen/arch/arm/arm32/head.S @@ -292,7 +292,7 @@ paging: /* Non-boot CPUs need to move on to the relocated pagetables */ mov r0, #0 - ldr r4, =boot_httbr /* VA of HTTBR value stashed by CPU 0 */ + ldr r4, =boot_ttbr /* VA of HTTBR value stashed by CPU 0 */ add r4, r4, r10 /* PA of it */ ldrd r4, r5, [r4] /* Actual value */ dsb diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c index bcc109d..fa57efe 100644 --- a/xen/arch/arm/mm.c +++ b/xen/arch/arm/mm.c @@ -40,13 +40,17 @@ struct domain *dom_xen, *dom_io, *dom_cow; /* Static start-of-day pagetables that we use before the allocators are up */ +/* xen_pgtable == root of the trie (zeroeth level on 64-bit, first on 32-bit) */ lpae_t xen_pgtable[LPAE_ENTRIES] __attribute__((__aligned__(4096))); +#ifdef CONFIG_ARM_64 +lpae_t xen_first[LPAE_ENTRIES] __attribute__((__aligned__(4096))); +#endif lpae_t xen_second[LPAE_ENTRIES*4] __attribute__((__aligned__(4096*4))); lpae_t xen_fixmap[LPAE_ENTRIES] __attribute__((__aligned__(4096))); static lpae_t xen_xenmap[LPAE_ENTRIES] __attribute__((__aligned__(4096))); /* Non-boot CPUs use this to find the correct pagetables. */ -uint64_t boot_httbr; +uint64_t boot_ttbr; static paddr_t phys_offset; @@ -70,24 +74,21 @@ void dump_pt_walk(lpae_t *first, paddr_t addr) if ( first_table_offset(addr) >= LPAE_ENTRIES ) return; - printk("1ST[0x%llx] = 0x%"PRIpaddr"\n", - first_table_offset(addr), + printk("1ST[0x%x] = 0x%"PRIpaddr"\n", first_table_offset(addr), first[first_table_offset(addr)].bits); if ( !first[first_table_offset(addr)].walk.valid || !first[first_table_offset(addr)].walk.table ) goto done; second = map_domain_page(first[first_table_offset(addr)].walk.base); - printk("2ND[0x%llx] = 0x%"PRIpaddr"\n", - second_table_offset(addr), + printk("2ND[0x%x] = 0x%"PRIpaddr"\n", second_table_offset(addr), second[second_table_offset(addr)].bits); if ( !second[second_table_offset(addr)].walk.valid || !second[second_table_offset(addr)].walk.table ) goto done; third = map_domain_page(second[second_table_offset(addr)].walk.base); - printk("3RD[0x%llx] = 0x%"PRIpaddr"\n", - third_table_offset(addr), + printk("3RD[0x%x] = 0x%"PRIpaddr"\n", third_table_offset(addr), third[third_table_offset(addr)].bits); done: @@ -96,14 +97,14 @@ done: } -void dump_hyp_walk(uint32_t addr) +void dump_hyp_walk(vaddr_t addr) { - uint64_t httbr = READ_CP64(HTTBR); + uint64_t ttbr = READ_SYSREG64(TTBR0_EL2); - printk("Walking Hypervisor VA 0x%08"PRIx32" via HTTBR 0x%016"PRIx64"\n", - addr, httbr); + printk("Walking Hypervisor VA 0x%"PRIvaddr" via TTBR 0x%016"PRIx64"\n", + addr, ttbr); - BUG_ON( (lpae_t *)(unsigned long)(httbr - phys_offset) != xen_pgtable ); + BUG_ON( (lpae_t *)(unsigned long)(ttbr - phys_offset) != xen_pgtable ); dump_pt_walk(xen_pgtable, addr); } @@ -132,7 +133,7 @@ void *map_domain_page(unsigned long mfn) unsigned long flags; lpae_t *map = xen_second + second_linear_offset(DOMHEAP_VIRT_START); unsigned long slot_mfn = mfn & ~LPAE_ENTRY_MASK; - uint32_t va; + vaddr_t va; lpae_t pte; int i, slot; @@ -272,26 +273,31 @@ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr) /* Update the copy of xen_pgtable to use the new paddrs */ p = (void *) xen_pgtable + dest_va - (unsigned long) _start; +#ifdef CONFIG_ARM_64 + p[0].pt.base += (phys_offset - boot_phys_offset) >> PAGE_SHIFT; + p = (void *) xen_first + dest_va - (unsigned long) _start; +#endif for ( i = 0; i < 4; i++) p[i].pt.base += (phys_offset - boot_phys_offset) >> PAGE_SHIFT; + p = (void *) xen_second + dest_va - (unsigned long) _start; if ( boot_phys_offset != 0 ) { /* Remove the old identity mapping of the boot paddr */ - unsigned long va = (unsigned long)_start + boot_phys_offset; + vaddr_t va = (vaddr_t)_start + boot_phys_offset; p[second_linear_offset(va)].bits = 0; } for ( i = 0; i < 4 * LPAE_ENTRIES; i++) if ( p[i].pt.valid ) - p[i].pt.base += (phys_offset - boot_phys_offset) >> PAGE_SHIFT; + p[i].pt.base += (phys_offset - boot_phys_offset) >> PAGE_SHIFT; /* Change pagetables to the copy in the relocated Xen */ - boot_httbr = (unsigned long) xen_pgtable + phys_offset; - flush_xen_dcache(boot_httbr); + boot_ttbr = (uintptr_t) xen_pgtable + phys_offset; + flush_xen_dcache(boot_ttbr); flush_xen_dcache_va_range((void*)dest_va, _end - _start); flush_xen_text_tlb(); - WRITE_CP64(boot_httbr, HTTBR); /* Change translation base */ + WRITE_SYSREG64(boot_ttbr, TTBR0_EL2); dsb(); /* Ensure visibility of HTTBR update */ flush_xen_text_tlb(); @@ -336,7 +342,7 @@ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr) /* TLBFLUSH and ISB would be needed here, but wait until we set WXN */ /* From now on, no mapping may be both writable and executable. */ - WRITE_CP32(READ_CP32(HSCTLR) | SCTLR_WXN, HSCTLR); + WRITE_SYSREG32(READ_SYSREG32(SCTLR_EL2) | SCTLR_WXN, SCTLR_EL2); /* Flush everything after setting WXN bit. */ flush_xen_text_tlb(); } @@ -345,7 +351,7 @@ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr) void __cpuinit mmu_init_secondary_cpu(void) { /* From now on, no mapping may be both writable and executable. */ - WRITE_CP32(READ_CP32(HSCTLR) | SCTLR_WXN, HSCTLR); + WRITE_SYSREG32(READ_SYSREG32(SCTLR_EL2) | SCTLR_WXN, SCTLR_EL2); flush_xen_text_tlb(); } diff --git a/xen/include/asm-arm/cpregs.h b/xen/include/asm-arm/cpregs.h index 559be75..36da12e 100644 --- a/xen/include/asm-arm/cpregs.h +++ b/xen/include/asm-arm/cpregs.h @@ -242,6 +242,8 @@ #define ID_MMFR3_EL1 ID_MMFR3 #define ID_PFR0_EL1 ID_PFR0 #define ID_PFR1_EL1 ID_PFR1 +#define SCTLR_EL2 HSCTLR +#define TTBR0_EL2 HTTBR #define VBAR_EL2 HVBAR #define VTCR_EL2 VTCR diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h index ad52567..11b5930 100644 --- a/xen/include/asm-arm/page.h +++ b/xen/include/asm-arm/page.h @@ -274,7 +274,7 @@ static inline void flush_xen_dcache_va_range(void *p, unsigned long size) void dump_pt_walk(lpae_t *table, paddr_t addr); /* Print a walk of the hypervisor''s page tables for a virtual addr. */ -extern void dump_hyp_walk(uint32_t addr); +extern void dump_hyp_walk(vaddr_t addr); /* Print a walk of the p2m for a domain for a physical address. */ extern void dump_p2m_lookup(struct domain *d, paddr_t addr); @@ -326,9 +326,11 @@ static inline int gva_to_ipa(vaddr_t va, paddr_t *paddr) #define first_linear_offset(va) (va >> FIRST_SHIFT) #define second_linear_offset(va) (va >> SECOND_SHIFT) #define third_linear_offset(va) (va >> THIRD_SHIFT) -#define first_table_offset(va) (first_linear_offset(va)) -#define second_table_offset(va) (second_linear_offset(va) & LPAE_ENTRY_MASK) -#define third_table_offset(va) (third_linear_offset(va) & LPAE_ENTRY_MASK) + +#define TABLE_OFFSET(offs) ((unsigned int)(offs) & LPAE_ENTRY_MASK) +#define first_table_offset(va) TABLE_OFFSET(first_linear_offset(va)) +#define second_table_offset(va) TABLE_OFFSET(second_linear_offset(va)) +#define third_table_offset(va) TABLE_OFFSET(third_linear_offset(va)) #define clear_page(page)memset((void *)(page), 0, PAGE_SIZE) -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 20/46] xen: arm64: add to foreign struct checks
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- .gitignore | 1 + tools/include/xen-foreign/Makefile | 5 ++++- tools/include/xen-foreign/mkheader.py | 19 +++++++++++++++++++ tools/include/xen-foreign/reference.size | 20 ++++++++++---------- tools/include/xen-foreign/structs.py | 1 + 5 files changed, 35 insertions(+), 11 deletions(-) diff --git a/.gitignore b/.gitignore index 73c5b77..2242344 100644 --- a/.gitignore +++ b/.gitignore @@ -364,6 +364,7 @@ tools/include/xen-foreign/structs.pyc tools/include/xen-foreign/x86_32.h tools/include/xen-foreign/x86_64.h tools/include/xen-foreign/arm32.h +tools/include/xen-foreign/arm64.h .git tools/misc/xen-hptool diff --git a/tools/include/xen-foreign/Makefile b/tools/include/xen-foreign/Makefile index 53cc6b4..06b844c 100644 --- a/tools/include/xen-foreign/Makefile +++ b/tools/include/xen-foreign/Makefile @@ -3,7 +3,7 @@ include $(XEN_ROOT)/tools/Rules.mk ROOT = $(XEN_ROOT)/xen/include/public -architectures := arm32 x86_32 x86_64 +architectures := arm32 arm64 x86_32 x86_64 headers := $(patsubst %, %.h, $(architectures)) .PHONY: all clean check-headers @@ -25,6 +25,9 @@ check-headers: checker arm32.h: mkheader.py structs.py $(ROOT)/arch-arm.h $(ROOT)/xen.h $(PYTHON) $< $* $@ $(filter %.h,$^) +arm64.h: mkheader.py structs.py $(ROOT)/arch-arm.h $(ROOT)/xen.h + $(PYTHON) $< $* $@ $(filter %.h,$^) + x86_32.h: mkheader.py structs.py $(ROOT)/arch-x86/xen-x86_32.h $(ROOT)/arch-x86/xen.h $(ROOT)/xen.h $(PYTHON) $< $* $@ $(filter %.h,$^) diff --git a/tools/include/xen-foreign/mkheader.py b/tools/include/xen-foreign/mkheader.py index b7c34b1..4858687 100644 --- a/tools/include/xen-foreign/mkheader.py +++ b/tools/include/xen-foreign/mkheader.py @@ -26,6 +26,22 @@ inttypes["arm32"] = { header["arm32"] = """ #define __arm___ARM32 1 """; +footer["arm32"] = """ +#undef __DECL_REG +""" + +inttypes["arm64"] = { + "unsigned long" : "__danger_unsigned_long_on_arm64", + "long" : "__danger_long_on_arm64", + "xen_pfn_t" : "uint64_t", + "xen_ulong_t" : "uint64_t", +}; +header["arm64"] = """ +#define __aarch64___ARM64 1 +"""; +footer["arm64"] = """ +#undef __DECL_REG +""" # x86_32 inttypes["x86_32"] = { @@ -59,6 +75,9 @@ header["x86_64"] = """ #endif #define __x86_64___X86_64 1 """; +footer["x86_64"] = """ +#undef __DECL_REG +""" ########################################################################### # main diff --git a/tools/include/xen-foreign/reference.size b/tools/include/xen-foreign/reference.size index 0e5529d..7659c64 100644 --- a/tools/include/xen-foreign/reference.size +++ b/tools/include/xen-foreign/reference.size @@ -1,13 +1,13 @@ -structs | arm32 x86_32 x86_64 +structs | arm32 arm64 x86_32 x86_64 -start_info | - 1112 1168 -trap_info | - 8 16 -cpu_user_regs | 160 68 200 -vcpu_guest_context | 180 2800 5168 -arch_vcpu_info | 0 24 16 -vcpu_time_info | 32 32 32 -vcpu_info | 48 64 64 -arch_shared_info | 0 268 280 -shared_info | 1088 2584 3368 +start_info | - - 1112 1168 +trap_info | - - 8 16 +cpu_user_regs | 160 160 68 200 +vcpu_guest_context | 180 180 2800 5168 +arch_vcpu_info | 0 0 24 16 +vcpu_time_info | 32 32 32 32 +vcpu_info | 48 48 64 64 +arch_shared_info | 0 0 268 280 +shared_info | 1088 1088 2584 3368 diff --git a/tools/include/xen-foreign/structs.py b/tools/include/xen-foreign/structs.py index 51a77c0..5aec2c5 100644 --- a/tools/include/xen-foreign/structs.py +++ b/tools/include/xen-foreign/structs.py @@ -14,6 +14,7 @@ structs = [ "start_info", "shared_info" ]; defines = [ "__arm__", + "__aarch64__", "__i386__", "__x86_64__", -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 21/46] xen: arm: extend HSR struct definitions to 64-bit
The main change is that the 4-bit register specifiers are extended to 5 bits by taking in an adjacent SBZP bit. Also 64-bit has two other properties indicting whether or not the target register was 64-bit (x<n>) or 32-bit (w<n>) and whether the instruction has acquire/release semantics. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- xen/include/asm-arm/processor.h | 20 ++++++++++++-------- 1 files changed, 12 insertions(+), 8 deletions(-) diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h index 0768cd4..8183d36 100644 --- a/xen/include/asm-arm/processor.h +++ b/xen/include/asm-arm/processor.h @@ -99,11 +99,11 @@ union hsr { unsigned long ec:6; /* Exception Class */ }; + /* reg, reg0, reg1 are 4 bits on AArch32, the fifth bit is sbzp. */ struct hsr_cp32 { unsigned long read:1; /* Direction */ unsigned long crm:4; /* CRm */ - unsigned long reg:4; /* Rt */ - unsigned long sbzp:1; + unsigned long reg:5; /* Rt */ unsigned long crn:4; /* CRn */ unsigned long op1:3; /* Op1 */ unsigned long op2:3; /* Op2 */ @@ -116,10 +116,9 @@ union hsr { struct hsr_cp64 { unsigned long read:1; /* Direction */ unsigned long crm:4; /* CRm */ - unsigned long reg1:4; /* Rt1 */ - unsigned long sbzp1:1; - unsigned long reg2:4; /* Rt2 */ - unsigned long sbzp2:2; + unsigned long reg1:5; /* Rt1 */ + unsigned long reg2:5; /* Rt2 */ + unsigned long sbzp2:1; unsigned long op1:4; /* Op1 */ unsigned long cc:4; /* Condition Code */ unsigned long ccvalid:1;/* CC Valid */ @@ -133,9 +132,14 @@ union hsr { unsigned long s1ptw:1; /* */ unsigned long cache:1; /* Cache Maintenance */ unsigned long eat:1; /* External Abort Type */ +#ifdef CONFIG_ARM_32 unsigned long sbzp0:6; - unsigned long reg:4; /* Register */ - unsigned long sbzp1:1; +#else + unsigned long sbzp0:4; + unsigned long ar:1; /* Acquire Release */ + unsigned long sf:1; /* Sixty Four bit register */ +#endif + unsigned long reg:5; /* Register */ unsigned long sign:1; /* Sign extend */ unsigned long size:2; /* Access Size */ unsigned long valid:1; /* Syndrome Valid */ -- 1.7.2.5
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- xen/arch/arm/guestcopy.c | 16 +++++++++------- xen/include/asm-arm/mm.h | 6 +++--- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/xen/arch/arm/guestcopy.c b/xen/arch/arm/guestcopy.c index 5504e19..de1a216 100644 --- a/xen/arch/arm/guestcopy.c +++ b/xen/arch/arm/guestcopy.c @@ -8,7 +8,7 @@ unsigned long raw_copy_to_guest(void *to, const void *from, unsigned len) { /* XXX needs to handle faults */ - unsigned offset = ((unsigned long)to & ~PAGE_MASK); + unsigned offset = (vaddr_t)to & ~PAGE_MASK; while ( len ) { @@ -17,7 +17,7 @@ unsigned long raw_copy_to_guest(void *to, const void *from, unsigned len) void *p; unsigned size = min(len, (unsigned)PAGE_SIZE - offset); - rc = gvirt_to_maddr((uint32_t) to, &g); + rc = gvirt_to_maddr((vaddr_t) to, &g); if ( rc ) return rc; @@ -38,7 +38,7 @@ unsigned long raw_copy_to_guest(void *to, const void *from, unsigned len) unsigned long raw_clear_guest(void *to, unsigned len) { /* XXX needs to handle faults */ - unsigned offset = ((unsigned long)to & ~PAGE_MASK); + unsigned offset = (vaddr_t)to & ~PAGE_MASK; while ( len ) { @@ -47,7 +47,7 @@ unsigned long raw_clear_guest(void *to, unsigned len) void *p; unsigned size = min(len, (unsigned)PAGE_SIZE - offset); - rc = gvirt_to_maddr((uint32_t) to, &g); + rc = gvirt_to_maddr((vaddr_t) to, &g); if ( rc ) return rc; @@ -66,19 +66,21 @@ unsigned long raw_clear_guest(void *to, unsigned len) unsigned long raw_copy_from_guest(void *to, const void __user *from, unsigned len) { + unsigned offset = (vaddr_t)from & ~PAGE_MASK; + while ( len ) { int rc; paddr_t g; void *p; - unsigned size = min(len, (unsigned)(PAGE_SIZE - ((unsigned)from & (~PAGE_MASK)))); + unsigned size = min(len, (unsigned)(PAGE_SIZE - offset)); - rc = gvirt_to_maddr((uint32_t) from & PAGE_MASK, &g); + rc = gvirt_to_maddr((vaddr_t) from & PAGE_MASK, &g); if ( rc ) return rc; p = map_domain_page(g>>PAGE_SHIFT); - p += ((unsigned long)from & (~PAGE_MASK)); + p += ((vaddr_t)from & (~PAGE_MASK)); memcpy(to, p, size); diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h index f04829d..ff838b3 100644 --- a/xen/include/asm-arm/mm.h +++ b/xen/include/asm-arm/mm.h @@ -184,8 +184,8 @@ void* early_ioremap(paddr_t start, size_t len, unsigned attributes); static inline paddr_t virt_to_maddr(const void *va) { - uint64_t par = va_to_par((uint32_t)va); - return (par & PADDR_MASK & PAGE_MASK) | ((unsigned long) va & ~PAGE_MASK); + uint64_t par = va_to_par((vaddr_t)va); + return (par & PADDR_MASK & PAGE_MASK) | ((vaddr_t) va & ~PAGE_MASK); } static inline void *maddr_to_virt(paddr_t ma) @@ -195,7 +195,7 @@ static inline void *maddr_to_virt(paddr_t ma) return (void *)(unsigned long) ma + XENHEAP_VIRT_START; } -static inline int gvirt_to_maddr(uint32_t va, paddr_t *pa) +static inline int gvirt_to_maddr(vaddr_t va, paddr_t *pa) { uint64_t par = gva_to_ma_par(va); if ( par & PAR_F ) -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 23/46] xen: arm: add register_t type, native register size for the hypervisor
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> but: This is mostly a matter of coding taste, so I''d like Stefano''s ack/nack here as well. --- --- xen/arch/arm/domain_build.c | 2 +- xen/arch/arm/smpboot.c | 2 +- xen/arch/arm/traps.c | 44 ++++++++++++++++++++++-------------------- xen/arch/arm/vgic.c | 18 ++++++++-------- xen/arch/arm/vpl011.c | 6 ++-- xen/arch/arm/vtimer.c | 6 ++-- xen/include/asm-arm/regs.h | 2 +- xen/include/asm-arm/types.h | 4 +++ 8 files changed, 45 insertions(+), 39 deletions(-) diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c index 7403f1a..30d014a 100644 --- a/xen/arch/arm/domain_build.c +++ b/xen/arch/arm/domain_build.c @@ -268,7 +268,7 @@ static int prepare_dtb(struct domain *d, struct kernel_info *kinfo) static void dtb_load(struct kernel_info *kinfo) { - void * __user dtb_virt = (void *)(u32)kinfo->dtb_paddr; + void * __user dtb_virt = (void *)(register_t)kinfo->dtb_paddr; raw_copy_to_guest(dtb_virt, kinfo->fdt, fdt_totalsize(kinfo->fdt)); xfree(kinfo->fdt); diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c index 86379b7..d8eb5d3 100644 --- a/xen/arch/arm/smpboot.c +++ b/xen/arch/arm/smpboot.c @@ -142,7 +142,7 @@ void __cpuinit start_secondary(unsigned long boot_phys_offset, set_processor_id(cpuid); /* Setup Hyp vector base */ - WRITE_CP32((uint32_t) hyp_traps_vector, HVBAR); + WRITE_CP32((register_t) hyp_traps_vector, HVBAR); mmu_init_secondary_cpu(); enable_vfp(); diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index eaf1f52..0299b33 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -68,7 +68,7 @@ static void print_xen_info(void) debug_build() ? ''y'' : ''n'', print_tainted(taint_str)); } -uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg) +register_t *select_user_reg(struct cpu_user_regs *regs, int reg) { BUG_ON( !guest_mode(regs) ); @@ -81,20 +81,20 @@ uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg) switch ( reg ) { case 0 ... 7: /* Unbanked registers */ - BUILD_BUG_ON(REGOFFS(r0) + 7*sizeof(uint32_t) != REGOFFS(r7)); + BUILD_BUG_ON(REGOFFS(r0) + 7*sizeof(register_t) != REGOFFS(r7)); return ®s->r0 + reg; case 8 ... 12: /* Register banked in FIQ mode */ - BUILD_BUG_ON(REGOFFS(r8_fiq) + 4*sizeof(uint32_t) != REGOFFS(r12_fiq)); + BUILD_BUG_ON(REGOFFS(r8_fiq) + 4*sizeof(register_t) != REGOFFS(r12_fiq)); if ( fiq_mode(regs) ) return ®s->r8_fiq + reg - 8; else return ®s->r8 + reg - 8; case 13 ... 14: /* Banked SP + LR registers */ - BUILD_BUG_ON(REGOFFS(sp_fiq) + 1*sizeof(uint32_t) != REGOFFS(lr_fiq)); - BUILD_BUG_ON(REGOFFS(sp_irq) + 1*sizeof(uint32_t) != REGOFFS(lr_irq)); - BUILD_BUG_ON(REGOFFS(sp_svc) + 1*sizeof(uint32_t) != REGOFFS(lr_svc)); - BUILD_BUG_ON(REGOFFS(sp_abt) + 1*sizeof(uint32_t) != REGOFFS(lr_abt)); - BUILD_BUG_ON(REGOFFS(sp_und) + 1*sizeof(uint32_t) != REGOFFS(lr_und)); + BUILD_BUG_ON(REGOFFS(sp_fiq) + 1*sizeof(register_t) != REGOFFS(lr_fiq)); + BUILD_BUG_ON(REGOFFS(sp_irq) + 1*sizeof(register_t) != REGOFFS(lr_irq)); + BUILD_BUG_ON(REGOFFS(sp_svc) + 1*sizeof(register_t) != REGOFFS(lr_svc)); + BUILD_BUG_ON(REGOFFS(sp_abt) + 1*sizeof(register_t) != REGOFFS(lr_abt)); + BUILD_BUG_ON(REGOFFS(sp_und) + 1*sizeof(register_t) != REGOFFS(lr_und)); switch ( regs->cpsr & PSR_MODE_MASK ) { case PSR_MODE_USR: @@ -315,11 +315,11 @@ static void show_guest_stack(struct cpu_user_regs *regs) printk("GUEST STACK GOES HERE\n"); } -#define STACK_BEFORE_EXCEPTION(regs) ((uint32_t*)(regs)->sp) +#define STACK_BEFORE_EXCEPTION(regs) ((register_t*)(regs)->sp) static void show_trace(struct cpu_user_regs *regs) { - uint32_t *frame, next, addr, low, high; + register_t *frame, next, addr, low, high; printk("Xen call trace:\n "); @@ -327,7 +327,7 @@ static void show_trace(struct cpu_user_regs *regs) print_symbol(" %s\n ", regs->pc); /* Bounds for range of valid frame pointer. */ - low = (uint32_t)(STACK_BEFORE_EXCEPTION(regs)/* - 2*/); + low = (register_t)(STACK_BEFORE_EXCEPTION(regs)/* - 2*/); high = (low & ~(STACK_SIZE - 1)) + (STACK_SIZE - sizeof(struct cpu_info)); @@ -356,7 +356,7 @@ static void show_trace(struct cpu_user_regs *regs) break; { /* Ordinary stack frame. */ - frame = (uint32_t *)next; + frame = (register_t *)next; next = frame[-1]; addr = frame[0]; } @@ -364,7 +364,7 @@ static void show_trace(struct cpu_user_regs *regs) printk("[<%p>]", _p(addr)); print_symbol(" %s\n ", addr); - low = (uint32_t)&frame[1]; + low = (register_t)&frame[1]; } printk("\n"); @@ -372,7 +372,7 @@ static void show_trace(struct cpu_user_regs *regs) void show_stack(struct cpu_user_regs *regs) { - uint32_t *stack = STACK_BEFORE_EXCEPTION(regs), addr; + register_t *stack = STACK_BEFORE_EXCEPTION(regs), addr; int i; if ( guest_mode(regs) ) @@ -486,20 +486,22 @@ static arm_hypercall_t arm_hypercall_table[] = { static void do_debug_trap(struct cpu_user_regs *regs, unsigned int code) { - uint32_t reg, *r; + register_t *r; + uint32_t reg; uint32_t domid = current->domain->domain_id; switch ( code ) { case 0xe0 ... 0xef: reg = code - 0xe0; r = select_user_reg(regs, reg); - printk("DOM%d: R%d = %#010"PRIx32" at %#010"PRIx32"\n", + printk("DOM%d: R%d = 0x%"PRIregister" at 0x%"PRIvaddr"\n", domid, reg, *r, regs->pc); break; case 0xfd: - printk("DOM%d: Reached %#010"PRIx32"\n", domid, regs->pc); + printk("DOM%d: Reached %"PRIvaddr"\n", domid, regs->pc); break; case 0xfe: - printk("%c", (char)(regs->r0 & 0xff)); + r = select_user_reg(regs, 0); + printk("%c", (char)(*r & 0xff)); break; case 0xff: printk("DOM%d: DEBUG\n", domid); @@ -561,7 +563,7 @@ static void do_cp15_32(struct cpu_user_regs *regs, union hsr hsr) { struct hsr_cp32 cp32 = hsr.cp32; - uint32_t *r = select_user_reg(regs, cp32.reg); + uint32_t *r = (uint32_t*)select_user_reg(regs, cp32.reg); if ( !cp32.ccvalid ) { dprintk(XENLOG_ERR, "cp_15(32): need to handle invalid condition codes\n"); @@ -607,7 +609,7 @@ static void do_cp15_32(struct cpu_user_regs *regs, BUG_ON(!vtimer_emulate(regs, hsr)); break; default: - printk("%s p15, %d, r%d, cr%d, cr%d, %d @ %#08x\n", + printk("%s p15, %d, r%d, cr%d, cr%d, %d @ 0x%"PRIregister"\n", cp32.read ? "mrc" : "mcr", cp32.op1, cp32.reg, cp32.crn, cp32.crm, cp32.op2, regs->pc); panic("unhandled 32-bit CP15 access %#x\n", hsr.bits & HSR_CP32_REGS_MASK); @@ -637,7 +639,7 @@ static void do_cp15_64(struct cpu_user_regs *regs, BUG_ON(!vtimer_emulate(regs, hsr)); break; default: - printk("%s p15, %d, r%d, r%d, cr%d @ %#08x\n", + printk("%s p15, %d, r%d, r%d, cr%d @ 0x%"PRIregister"\n", cp64.read ? "mrrc" : "mcrr", cp64.op1, cp64.reg1, cp64.reg2, cp64.crm, regs->pc); panic("unhandled 64-bit CP15 access %#x\n", hsr.bits & HSR_CP64_REGS_MASK); diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c index 39b9775..57147d5 100644 --- a/xen/arch/arm/vgic.c +++ b/xen/arch/arm/vgic.c @@ -160,7 +160,7 @@ static int vgic_distr_mmio_read(struct vcpu *v, mmio_info_t *info) { struct hsr_dabt dabt = info->dabt; struct cpu_user_regs *regs = guest_cpu_user_regs(); - uint32_t *r = select_user_reg(regs, dabt.reg); + register_t *r = select_user_reg(regs, dabt.reg); struct vgic_irq_rank *rank; int offset = (int)(info->gpa - VGIC_DISTR_BASE_ADDRESS); int gicd_reg = REG(offset); @@ -372,7 +372,7 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info) { struct hsr_dabt dabt = info->dabt; struct cpu_user_regs *regs = guest_cpu_user_regs(); - uint32_t *r = select_user_reg(regs, dabt.reg); + register_t *r = select_user_reg(regs, dabt.reg); struct vgic_irq_rank *rank; int offset = (int)(info->gpa - VGIC_DISTR_BASE_ADDRESS); int gicd_reg = REG(offset); @@ -421,13 +421,13 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info) case GICD_ISPENDR ... GICD_ISPENDRN: if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width; - printk("vGICD: unhandled %s write %#"PRIx32" to ISPENDR%d\n", + printk("vGICD: unhandled %s write %#"PRIregister" to ISPENDR%d\n", dabt.size ? "word" : "byte", *r, gicd_reg - GICD_ISPENDR); return 0; case GICD_ICPENDR ... GICD_ICPENDRN: if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width; - printk("vGICD: unhandled %s write %#"PRIx32" to ICPENDR%d\n", + printk("vGICD: unhandled %s write %#"PRIregister" to ICPENDR%d\n", dabt.size ? "word" : "byte", *r, gicd_reg - GICD_ICPENDR); return 0; @@ -499,19 +499,19 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info) case GICD_SGIR: if ( dabt.size != 2 ) goto bad_width; - printk("vGICD: unhandled write %#"PRIx32" to ICFGR%d\n", + printk("vGICD: unhandled write %#"PRIregister" to ICFGR%d\n", *r, gicd_reg - GICD_ICFGR); return 0; case GICD_CPENDSGIR ... GICD_CPENDSGIRN: if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width; - printk("vGICD: unhandled %s write %#"PRIx32" to ICPENDSGIR%d\n", + printk("vGICD: unhandled %s write %#"PRIregister" to ICPENDSGIR%d\n", dabt.size ? "word" : "byte", *r, gicd_reg - GICD_CPENDSGIR); return 0; case GICD_SPENDSGIR ... GICD_SPENDSGIRN: if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width; - printk("vGICD: unhandled %s write %#"PRIx32" to ISPENDSGIR%d\n", + printk("vGICD: unhandled %s write %#"PRIregister" to ISPENDSGIR%d\n", dabt.size ? "word" : "byte", *r, gicd_reg - GICD_SPENDSGIR); return 0; @@ -537,13 +537,13 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info) goto write_ignore; default: - printk("vGICD: unhandled write r%d=%"PRIx32" offset %#08x\n", + printk("vGICD: unhandled write r%d=%"PRIregister" offset %#08x\n", dabt.reg, *r, offset); return 0; } bad_width: - printk("vGICD: bad write width %d r%d=%"PRIx32" offset %#08x\n", + printk("vGICD: bad write width %d r%d=%"PRIregister" offset %#08x\n", dabt.size, dabt.reg, *r, offset); domain_crash_synchronous(); return 0; diff --git a/xen/arch/arm/vpl011.c b/xen/arch/arm/vpl011.c index 7dcee90..db5094e 100644 --- a/xen/arch/arm/vpl011.c +++ b/xen/arch/arm/vpl011.c @@ -92,7 +92,7 @@ static int uart0_mmio_read(struct vcpu *v, mmio_info_t *info) { struct hsr_dabt dabt = info->dabt; struct cpu_user_regs *regs = guest_cpu_user_regs(); - uint32_t *r = select_user_reg(regs, dabt.reg); + register_t *r = select_user_reg(regs, dabt.reg); int offset = (int)(info->gpa - UART0_START); switch ( offset ) @@ -114,7 +114,7 @@ static int uart0_mmio_write(struct vcpu *v, mmio_info_t *info) { struct hsr_dabt dabt = info->dabt; struct cpu_user_regs *regs = guest_cpu_user_regs(); - uint32_t *r = select_user_reg(regs, dabt.reg); + register_t *r = select_user_reg(regs, dabt.reg); int offset = (int)(info->gpa - UART0_START); switch ( offset ) @@ -127,7 +127,7 @@ static int uart0_mmio_write(struct vcpu *v, mmio_info_t *info) /* Silently ignore */ return 1; default: - printk("VPL011: unhandled write r%d=%"PRIx32" offset %#08x\n", + printk("VPL011: unhandled write r%d=%"PRIregister" offset %#08x\n", dabt.reg, *r, offset); domain_crash_synchronous(); } diff --git a/xen/arch/arm/vtimer.c b/xen/arch/arm/vtimer.c index 85201b5..291b87e 100644 --- a/xen/arch/arm/vtimer.c +++ b/xen/arch/arm/vtimer.c @@ -99,7 +99,7 @@ static int vtimer_emulate_32(struct cpu_user_regs *regs, union hsr hsr) { struct vcpu *v = current; struct hsr_cp32 cp32 = hsr.cp32; - uint32_t *r = select_user_reg(regs, cp32.reg); + uint32_t *r = (uint32_t *)select_user_reg(regs, cp32.reg); s_time_t now; switch ( hsr.bits & HSR_CP32_REGS_MASK ) @@ -151,8 +151,8 @@ static int vtimer_emulate_64(struct cpu_user_regs *regs, union hsr hsr) { struct vcpu *v = current; struct hsr_cp64 cp64 = hsr.cp64; - uint32_t *r1 = select_user_reg(regs, cp64.reg1); - uint32_t *r2 = select_user_reg(regs, cp64.reg2); + uint32_t *r1 = (uint32_t *)select_user_reg(regs, cp64.reg1); + uint32_t *r2 = (uint32_t *)select_user_reg(regs, cp64.reg2); uint64_t ticks; s_time_t now; diff --git a/xen/include/asm-arm/regs.h b/xen/include/asm-arm/regs.h index 7486944..a723f92 100644 --- a/xen/include/asm-arm/regs.h +++ b/xen/include/asm-arm/regs.h @@ -34,7 +34,7 @@ * Returns a pointer to the given register value in regs, taking the * processor mode (CPSR) into account. */ -extern uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg); +extern register_t *select_user_reg(struct cpu_user_regs *regs, int reg); #endif /* __ARM_REGS_H__ */ /* diff --git a/xen/include/asm-arm/types.h b/xen/include/asm-arm/types.h index d3e16d8..9ca32f1 100644 --- a/xen/include/asm-arm/types.h +++ b/xen/include/asm-arm/types.h @@ -41,6 +41,8 @@ typedef u32 vaddr_t; typedef u64 paddr_t; #define INVALID_PADDR (~0ULL) #define PRIpaddr "016llx" +typedef u32 register_t; +#define PRIregister "x" #elif defined (CONFIG_ARM_64) typedef signed long s64; typedef unsigned long u64; @@ -49,6 +51,8 @@ typedef u64 vaddr_t; typedef u64 paddr_t; #define INVALID_PADDR (~0UL) #define PRIpaddr "016lx" +typedef u64 register_t; +#define PRIregister "lx" #endif typedef unsigned long size_t; -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 24/46] xen: arm: separate guest user regs from internal guest state.
struct cpu_user_regs is currently used as both internal state (specifically at the base of the stack) and a guest/toolstack visible API (via struct vcpu_guest_context used by XEN_DOMCTL_{g,s}etvcpucontext and VCPUOP_initialise). This causes problems when we want to make the API 64-bit clean since we don''t really want to change the size of the on-stack struct. So split into vcpu_guest_core_regs which is the API facing struct and keep cpu_user_regs purely internal, translate between the two. In the user API arrange for both 64- and 32-bit registers to be included in a layout which does not differ depending on toolstack architecture. Also switch to using the more formal banked register names (e.g. with the _usr suffix) for clarity. This is an ABI change. Note that the kernel doesn''t currently use this data structure so it affects the tools interface only. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- v2: Allow 32-bit to see 64-bit register names too, this is needed so that 32-bit toolstacks can access/control 64-bit guests. --- tools/include/xen-foreign/mkheader.py | 10 +++ tools/include/xen-foreign/reference.size | 5 +- tools/include/xen-foreign/structs.py | 1 + tools/libxc/xc_dom_arm.c | 10 ++-- xen/arch/arm/arm32/Makefile | 2 + xen/arch/arm/arm32/domain.c | 51 +++++++++++++ xen/arch/arm/arm64/Makefile | 2 + xen/arch/arm/arm64/domain.c | 66 +++++++++++++++++ xen/arch/arm/domain.c | 4 +- xen/arch/arm/domctl.c | 4 +- xen/include/asm-arm/arm32/processor.h | 52 +++++++++++++ xen/include/asm-arm/arm64/processor.h | 81 +++++++++++++++++++++ xen/include/asm-arm/current.h | 1 + xen/include/asm-arm/processor.h | 5 ++ xen/include/public/arch-arm.h | 115 ++++++++++++++++++------------ 15 files changed, 353 insertions(+), 56 deletions(-) create mode 100644 xen/arch/arm/arm32/domain.c create mode 100644 xen/arch/arm/arm64/domain.c diff --git a/tools/include/xen-foreign/mkheader.py b/tools/include/xen-foreign/mkheader.py index 4858687..c57b55b 100644 --- a/tools/include/xen-foreign/mkheader.py +++ b/tools/include/xen-foreign/mkheader.py @@ -25,6 +25,11 @@ inttypes["arm32"] = { }; header["arm32"] = """ #define __arm___ARM32 1 +#if defined(__GNUC__) && !defined(__STRICT_ANSI__) +# define __DECL_REG(n64, n32) union { uint64_t n64; uint32_t n32; } +#else +# define __DECL_REG(n64, n32) uint64_t n64 +#endif """; footer["arm32"] = """ #undef __DECL_REG @@ -38,6 +43,11 @@ inttypes["arm64"] = { }; header["arm64"] = """ #define __aarch64___ARM64 1 +#if defined(__GNUC__) && !defined(__STRICT_ANSI__) +# define __DECL_REG(n64, n32) union { uint64_t n64; uint32_t n32; } +#else +# define __DECL_REG(n64, n32) uint64_t n64 +#endif """; footer["arm64"] = """ #undef __DECL_REG diff --git a/tools/include/xen-foreign/reference.size b/tools/include/xen-foreign/reference.size index 7659c64..b3347b4 100644 --- a/tools/include/xen-foreign/reference.size +++ b/tools/include/xen-foreign/reference.size @@ -3,8 +3,9 @@ structs | arm32 arm64 x86_32 x86_64 start_info | - - 1112 1168 trap_info | - - 8 16 -cpu_user_regs | 160 160 68 200 -vcpu_guest_context | 180 180 2800 5168 +cpu_user_regs | - - 68 200 +vcpu_guest_core_regs | 304 304 - - +vcpu_guest_context | 336 336 2800 5168 arch_vcpu_info | 0 0 24 16 vcpu_time_info | 32 32 32 32 vcpu_info | 48 48 64 64 diff --git a/tools/include/xen-foreign/structs.py b/tools/include/xen-foreign/structs.py index 5aec2c5..0b33a77 100644 --- a/tools/include/xen-foreign/structs.py +++ b/tools/include/xen-foreign/structs.py @@ -6,6 +6,7 @@ unions = [ "vcpu_cr_regs", structs = [ "start_info", "trap_info", "cpu_user_regs", + "vcpu_guest_core_regs", "vcpu_guest_context", "arch_vcpu_info", "vcpu_time_info", diff --git a/tools/libxc/xc_dom_arm.c b/tools/libxc/xc_dom_arm.c index 0cec774..e46cec9 100644 --- a/tools/libxc/xc_dom_arm.c +++ b/tools/libxc/xc_dom_arm.c @@ -107,17 +107,17 @@ static int vcpu_arm(struct xc_dom_image *dom, void *ptr) /* clear everything */ memset(ctxt, 0, sizeof(*ctxt)); - ctxt->user_regs.pc = dom->parms.virt_entry; + ctxt->user_regs.pc32 = dom->parms.virt_entry; /* Linux boot protocol. See linux.Documentation/arm/Booting. */ - ctxt->user_regs.r0 = 0; /* SBZ */ + ctxt->user_regs.r0_usr = 0; /* SBZ */ /* Machine ID: We use DTB therefore no machine id */ - ctxt->user_regs.r1 = 0xffffffff; + ctxt->user_regs.r1_usr = 0xffffffff; /* ATAGS/DTB: We currently require that the guest kernel to be * using CONFIG_ARM_APPENDED_DTB. Ensure that r2 does not look * like a valid pointer to a set of ATAGS or a DTB. */ - ctxt->user_regs.r2 = 0xffffffff; + ctxt->user_regs.r2_usr = 0xffffffff; ctxt->sctlr = /* #define SCTLR_BASE */0x00c50078; @@ -130,7 +130,7 @@ static int vcpu_arm(struct xc_dom_image *dom, void *ptr) ctxt->flags = VGCF_online; DOMPRINTF("Initial state CPSR %#"PRIx32" PC %#"PRIx32, - ctxt->user_regs.cpsr, ctxt->user_regs.pc); + ctxt->user_regs.cpsr, ctxt->user_regs.pc32); return 0; } diff --git a/xen/arch/arm/arm32/Makefile b/xen/arch/arm/arm32/Makefile index 20931fa..29898ae 100644 --- a/xen/arch/arm/arm32/Makefile +++ b/xen/arch/arm/arm32/Makefile @@ -3,3 +3,5 @@ subdir-y += lib obj-y += entry.o obj-y += mode_switch.o obj-y += proc-ca15.o + +obj-y += domain.o diff --git a/xen/arch/arm/arm32/domain.c b/xen/arch/arm/arm32/domain.c new file mode 100644 index 0000000..f75a2c6 --- /dev/null +++ b/xen/arch/arm/arm32/domain.c @@ -0,0 +1,51 @@ +#include <xen/config.h> +#include <xen/sched.h> + +#include <asm/domain.h> +#include <asm/processor.h> + +#include <public/xen.h> + +/* C(hyp,user), hyp is Xen internal name, user is user API name. */ + +#define ALLREGS \ + C(r0,r0_usr); C(r1,r1_usr); C(r2,r2_usr); C(r3,r3_usr); \ + C(r4,r4_usr); C(r5,r5_usr); C(r6,r6_usr); C(r7,r7_usr); \ + C(r8,r8_usr); C(r9,r9_usr); C(r10,r10_usr); C(r11,r11_usr); \ + C(r12,r12_usr); \ + C(sp_usr,sp_usr); \ + C(lr,lr_usr); \ + C(spsr_irq,spsr_irq); C(lr_irq,lr_irq); C(sp_irq,sp_irq); \ + C(spsr_svc,spsr_svc); C(lr_svc,lr_svc); C(sp_svc,sp_svc); \ + C(spsr_abt,spsr_abt); C(lr_abt,lr_abt); C(sp_abt,sp_abt); \ + C(spsr_und,spsr_und); C(lr_und,lr_und); C(sp_und,sp_und); \ + C(spsr_fiq,spsr_fiq); C(sp_fiq,sp_fiq); C(sp_fiq,sp_fiq); \ + C(r8_fiq,r8_fiq); C(r9_fiq,r9_fiq); \ + C(r10_fiq,r10_fiq); C(r11_fiq,r11_fiq); C(r12_fiq,r12_fiq); \ + C(pc,pc32); \ + C(cpsr,cpsr) + +void vcpu_regs_hyp_to_user(const struct vcpu *vcpu, + struct vcpu_guest_core_regs *regs) +{ +#define C(hyp,user) regs->user = vcpu->arch.cpu_info->guest_cpu_user_regs.hyp + ALLREGS; +#undef C +} + +void vcpu_regs_user_to_hyp(struct vcpu *vcpu, + const struct vcpu_guest_core_regs *regs) +{ +#define C(hyp,user) vcpu->arch.cpu_info->guest_cpu_user_regs.hyp = regs->user + ALLREGS; +#undef C +} + +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/arm64/Makefile b/xen/arch/arm/arm64/Makefile index c447eaa..815f305 100644 --- a/xen/arch/arm/arm64/Makefile +++ b/xen/arch/arm/arm64/Makefile @@ -1,3 +1,5 @@ subdir-y += lib obj-y += mode_switch.o + +obj-y += domain.o diff --git a/xen/arch/arm/arm64/domain.c b/xen/arch/arm/arm64/domain.c new file mode 100644 index 0000000..05df29e --- /dev/null +++ b/xen/arch/arm/arm64/domain.c @@ -0,0 +1,66 @@ +#include <xen/config.h> +#include <xen/sched.h> + +#include <asm/domain.h> +#include <asm/processor.h> + +#include <public/xen.h> + +/* C(hyp,user), hyp is Xen internal name, user is user API name. */ + +#define ALLREGS \ + C(x0,x0); C(x1,x1); C(x2,x2); C(x3,x3); \ + C(x4,x4); C(x5,x5); C(x6,x6); C(x7,x7); \ + C(x8,x8); C(x9,x9); C(x10,x10); C(x11,x11); \ + C(x12,x12); C(x13,x13); C(x14,x14); C(x15,x15); \ + C(x16,x16); C(x17,x17); C(x18,x18); C(x19,x19); \ + C(x20,x20); C(x21,x21); C(x22,x22); C(x23,x23); \ + C(x24,x24); C(x25,x25); C(x26,x26); C(x27,x27); \ + C(x28,x28); C(fp,x29); C(lr,x30); C(pc,pc64); \ + C(cpsr, cpsr); C(spsr_el1, spsr_el1) + +#define ALLREGS32 C(spsr_fiq, spsr_fiq); C(spsr_irq,spsr_irq); \ + C(spsr_und,spsr_und); C(spsr_abt,spsr_abt) + +#define ALLREGS64 C(sp_el0,sp_el0); C(sp_el1,sp_el1); C(elr_el1,elr_el1) + +void vcpu_regs_hyp_to_user(const struct vcpu *vcpu, + struct vcpu_guest_core_regs *regs) +{ +#define C(hyp,user) regs->user = vcpu->arch.cpu_info->guest_cpu_user_regs.hyp + ALLREGS; + if ( is_pv32_domain(vcpu->domain) ) + { + ALLREGS32; + } + else + { + ALLREGS64; + } +#undef C +} + +void vcpu_regs_user_to_hyp(struct vcpu *vcpu, + const struct vcpu_guest_core_regs *regs) +{ +#define C(hyp,user) vcpu->arch.cpu_info->guest_cpu_user_regs.hyp = regs->user + ALLREGS; + if ( is_pv32_domain(vcpu->domain) ) + { + ALLREGS32; + } + else + { + ALLREGS64; + } +#undef C +} + +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c index e7d3ec6..3651fb2 100644 --- a/xen/arch/arm/domain.c +++ b/xen/arch/arm/domain.c @@ -486,7 +486,7 @@ int arch_set_info_guest( struct vcpu *v, vcpu_guest_context_u c) { struct vcpu_guest_context *ctxt = c.nat; - struct cpu_user_regs *regs = &c.nat->user_regs; + struct vcpu_guest_core_regs *regs = &c.nat->user_regs; if ( !is_guest_psr(regs->cpsr) ) return -EINVAL; @@ -502,7 +502,7 @@ int arch_set_info_guest( if ( regs->spsr_fiq && !is_guest_psr(regs->spsr_fiq) ) return -EINVAL; - v->arch.cpu_info->guest_cpu_user_regs = *regs; + vcpu_regs_user_to_hyp(v, regs); v->arch.sctlr = ctxt->sctlr; v->arch.ttbr0 = ctxt->ttbr0; diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c index c7ffd8a..15f8537 100644 --- a/xen/arch/arm/domctl.c +++ b/xen/arch/arm/domctl.c @@ -20,9 +20,9 @@ long arch_do_domctl(struct xen_domctl *domctl, struct domain *d, void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c) { struct vcpu_guest_context *ctxt = c.nat; - struct cpu_user_regs *regs = &c.nat->user_regs; + struct vcpu_guest_core_regs *regs = &c.nat->user_regs; - *regs = v->arch.cpu_info->guest_cpu_user_regs; + vcpu_regs_hyp_to_user(v, regs); ctxt->sctlr = v->arch.sctlr; ctxt->ttbr0 = v->arch.ttbr0; diff --git a/xen/include/asm-arm/arm32/processor.h b/xen/include/asm-arm/arm32/processor.h index 843fbd2..a782d96 100644 --- a/xen/include/asm-arm/arm32/processor.h +++ b/xen/include/asm-arm/arm32/processor.h @@ -1,6 +1,58 @@ #ifndef __ASM_ARM_ARM32_PROCESSOR_H #define __ASM_ARM_ARM32_PROCESSOR_H +#ifndef __ASSEMBLY__ +/* On stack VCPU state */ +struct cpu_user_regs +{ + uint32_t r0; + uint32_t r1; + uint32_t r2; + uint32_t r3; + uint32_t r4; + uint32_t r5; + uint32_t r6; + uint32_t r7; + uint32_t r8; + uint32_t r9; + uint32_t r10; + union { + uint32_t r11; + uint32_t fp; + }; + uint32_t r12; + + uint32_t sp; /* r13 - SP: Valid for Hyp. frames only, o/w banked (see below) */ + + /* r14 - LR: is the same physical register as LR_usr */ + union { + uint32_t lr; /* r14 - LR: Valid for Hyp. Same physical register as lr_usr. */ + + uint32_t lr_usr; + }; + + uint32_t pc; /* Return IP */ + uint32_t cpsr; /* Return mode */ + uint32_t pad0; /* Doubleword-align the kernel half of the frame */ + + /* Outer guest frame only from here on... */ + + uint32_t sp_usr; /* LR_usr is the same register as LR, see above */ + + uint32_t sp_irq, lr_irq; + uint32_t sp_svc, lr_svc; + uint32_t sp_abt, lr_abt; + uint32_t sp_und, lr_und; + + uint32_t r8_fiq, r9_fiq, r10_fiq, r11_fiq, r12_fiq; + uint32_t sp_fiq, lr_fiq; + + uint32_t spsr_svc, spsr_abt, spsr_und, spsr_irq, spsr_fiq; + + uint32_t pad1; /* Doubleword-align the user half of the frame */ +}; +#endif + /* Layout as used in assembly, with src/dest registers mixed in */ #define __CP32(r, coproc, opc1, crn, crm, opc2) coproc, opc1, r, crn, crm, opc2 #define __CP64(r1, r2, coproc, opc, crm) coproc, opc, r1, r2, crm diff --git a/xen/include/asm-arm/arm64/processor.h b/xen/include/asm-arm/arm64/processor.h index fdb0dab..b4602fa 100644 --- a/xen/include/asm-arm/arm64/processor.h +++ b/xen/include/asm-arm/arm64/processor.h @@ -3,6 +3,87 @@ #ifndef __ASSEMBLY__ +/* Anonymous union includes both 32- and 64-bit names (e.g., r0/x0). */ + +#define __DECL_REG(n64, n32) union { \ + uint64_t n64; \ + uint32_t n32; \ +} + +/* On stack VCPU state */ +struct cpu_user_regs +{ + /* Aarch64 Aarch32 */ + __DECL_REG(x0, r0/*_usr*/); + __DECL_REG(x1, r1/*_usr*/); + __DECL_REG(x2, r2/*_usr*/); + __DECL_REG(x3, r3/*_usr*/); + __DECL_REG(x4, r4/*_usr*/); + __DECL_REG(x5, r5/*_usr*/); + __DECL_REG(x6, r6/*_usr*/); + __DECL_REG(x7, r7/*_usr*/); + __DECL_REG(x8, r8/*_usr*/); + __DECL_REG(x9, r9/*_usr*/); + __DECL_REG(x10, r10/*_usr*/); + __DECL_REG(x11 , r11/*_usr*/); + __DECL_REG(x12, r12/*_usr*/); + + __DECL_REG(x13, /* r13_usr */ sp_usr); + __DECL_REG(x14, /* r14_usr */ lr_usr); + + __DECL_REG(x15, /* r13_hyp */ __unused_sp_hyp); + + __DECL_REG(x16, /* r14_irq */ lr_irq); + __DECL_REG(x17, /* r13_irq */ sp_irq); + + __DECL_REG(x18, /* r14_svc */ lr_svc); + __DECL_REG(x19, /* r13_svc */ sp_svc); + + __DECL_REG(x20, /* r14_abt */ lr_abt); + __DECL_REG(x21, /* r13_abt */ sp_abt); + + __DECL_REG(x22, /* r14_und */ lr_und); + __DECL_REG(x23, /* r13_und */ sp_und); + + __DECL_REG(x24, r8_fiq); + __DECL_REG(x25, r9_fiq); + __DECL_REG(x26, r10_fiq); + __DECL_REG(x27, r11_fiq); + __DECL_REG(x28, r12_fiq); + __DECL_REG(/* x29 */ fp, /* r13_fiq */ sp_fiq); + __DECL_REG(/* x30 */ lr, /* r14_fiq */ lr_fiq); + + register_t sp; /* Valid for hypervisor frames */ + + /* Return address and mode */ + __DECL_REG(pc, pc32); /* ELR_EL2 */ + uint32_t cpsr; /* SPSR_EL2 */ + + uint64_t pad0; + + /* Outer guest frame only from here on... */ + + union { + uint32_t spsr_el1; /* AArch64 */ + uint32_t spsr_svc; /* AArch32 */ + }; + + uint32_t pad1; /* Align */ + + /* AArch32 guests only */ + uint32_t spsr_fiq, spsr_irq, spsr_und, spsr_abt; + + /* AArch64 guests only */ + uint64_t sp_el0; + uint64_t sp_el1, elr_el1; + + uint64_t pad2; /* Doubleword-align the user half of the frame */ +}; + +#undef __DECL_REG + +/* Access to system registers */ + #define READ_SYSREG32(name) ({ \ uint32_t _r; \ asm volatile("mrs %0, "#name : "=r" (_r)); \ diff --git a/xen/include/asm-arm/current.h b/xen/include/asm-arm/current.h index d20d7a8..c9c8ac7 100644 --- a/xen/include/asm-arm/current.h +++ b/xen/include/asm-arm/current.h @@ -6,6 +6,7 @@ #include <public/xen.h> #include <asm/percpu.h> +#include <asm/processor.h> #ifndef __ASSEMBLY__ diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h index 8183d36..230c901 100644 --- a/xen/include/asm-arm/processor.h +++ b/xen/include/asm-arm/processor.h @@ -253,6 +253,11 @@ void show_registers(struct cpu_user_regs *regs); #define cpu_to_core(_cpu) (0) #define cpu_to_socket(_cpu) (0) +void vcpu_regs_hyp_to_user(const struct vcpu *vcpu, + struct vcpu_guest_core_regs *regs); +void vcpu_regs_user_to_hyp(struct vcpu *vcpu, + const struct vcpu_guest_core_regs *regs); + #endif /* __ASSEMBLY__ */ #endif /* __ASM_ARM_PROCESSOR_H */ /* diff --git a/xen/include/public/arch-arm.h b/xen/include/public/arch-arm.h index dc12524..91f80d8 100644 --- a/xen/include/public/arch-arm.h +++ b/xen/include/public/arch-arm.h @@ -86,55 +86,80 @@ #endif #define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val) -struct cpu_user_regs -{ - uint32_t r0; - uint32_t r1; - uint32_t r2; - uint32_t r3; - uint32_t r4; - uint32_t r5; - uint32_t r6; - uint32_t r7; - uint32_t r8; - uint32_t r9; - uint32_t r10; - union { - uint32_t r11; - uint32_t fp; - }; - uint32_t r12; +#if defined(__GNUC__) && !defined(__STRICT_ANSI__) +/* Anonymous union includes both 32- and 64-bit names (e.g., r0/x0). */ +# define __DECL_REG(n64, n32) union { \ + uint64_t n64; \ + uint32_t n32; \ + } +#else +/* Non-gcc sources must always use the proper 64-bit name (e.g., x0). */ +#define __DECL_REG(n64, n32) uint64_t n64 +#endif - uint32_t sp; /* r13 - SP: Valid for Hyp. frames only, o/w banked (see below) */ +struct vcpu_guest_core_regs +{ + /* Aarch64 Aarch32 */ + __DECL_REG(x0, r0_usr); + __DECL_REG(x1, r1_usr); + __DECL_REG(x2, r2_usr); + __DECL_REG(x3, r3_usr); + __DECL_REG(x4, r4_usr); + __DECL_REG(x5, r5_usr); + __DECL_REG(x6, r6_usr); + __DECL_REG(x7, r7_usr); + __DECL_REG(x8, r8_usr); + __DECL_REG(x9, r9_usr); + __DECL_REG(x10, r10_usr); + __DECL_REG(x11, r11_usr); + __DECL_REG(x12, r12_usr); + + __DECL_REG(x13, sp_usr); + __DECL_REG(x14, lr_usr); + + __DECL_REG(x15, __unused_sp_hyp); + + __DECL_REG(x16, lr_irq); + __DECL_REG(x17, sp_irq); + + __DECL_REG(x18, lr_svc); + __DECL_REG(x19, sp_svc); + + __DECL_REG(x20, lr_abt); + __DECL_REG(x21, sp_abt); + + __DECL_REG(x22, lr_und); + __DECL_REG(x23, sp_und); + + __DECL_REG(x24, r8_fiq); + __DECL_REG(x25, r9_fiq); + __DECL_REG(x26, r10_fiq); + __DECL_REG(x27, r11_fiq); + __DECL_REG(x28, r12_fiq); + + __DECL_REG(x29, sp_fiq); + __DECL_REG(x30, lr_fiq); + + /* Return address and mode */ + __DECL_REG(pc64, pc32); /* ELR_EL2 */ + uint32_t cpsr; /* SPSR_EL2 */ - /* r14 - LR: is the same physical register as LR_usr */ union { - uint32_t lr; /* r14 - LR: Valid for Hyp. Same physical register as lr_usr. */ - uint32_t lr_usr; + uint32_t spsr_el1; /* AArch64 */ + uint32_t spsr_svc; /* AArch32 */ }; - uint32_t pc; /* Return IP */ - uint32_t cpsr; /* Return mode */ - uint32_t pad0; /* Doubleword-align the kernel half of the frame */ - - /* Outer guest frame only from here on... */ + /* AArch32 guests only */ + uint32_t spsr_fiq, spsr_irq, spsr_und, spsr_abt; - uint32_t sp_usr; /* LR_usr is the same register as LR, see above */ - - uint32_t sp_irq, lr_irq; - uint32_t sp_svc, lr_svc; - uint32_t sp_abt, lr_abt; - uint32_t sp_und, lr_und; - - uint32_t r8_fiq, r9_fiq, r10_fiq, r11_fiq, r12_fiq; - uint32_t sp_fiq, lr_fiq; - - uint32_t spsr_svc, spsr_abt, spsr_und, spsr_irq, spsr_fiq; - - uint32_t pad1; /* Doubleword-align the user half of the frame */ + /* AArch64 guests only */ + uint64_t sp_el0; + uint64_t sp_el1, elr_el1; }; -typedef struct cpu_user_regs cpu_user_regs_t; -DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); +typedef struct vcpu_guest_core_regs vcpu_guest_core_regs_t; +DEFINE_XEN_GUEST_HANDLE(vcpu_guest_core_regs_t); + +#undef __DECL_REG typedef uint64_t xen_pfn_t; #define PRI_xen_pfn PRIx64 @@ -151,10 +176,10 @@ struct vcpu_guest_context { #define VGCF_online (1<<_VGCF_online) uint32_t flags; /* VGCF_* */ - struct cpu_user_regs user_regs; /* User-level CPU registers */ + struct vcpu_guest_core_regs user_regs; /* Core CPU registers */ - uint32_t sctlr; - uint32_t ttbr0, ttbr1, ttbcr; + uint32_t sctlr, ttbcr; + uint64_t ttbr0, ttbr1; }; typedef struct vcpu_guest_context vcpu_guest_context_t; DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t); -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 25/46] xen: arm64: add guest type to domain field.
Currently 32 bit PV is the only option. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> v2: Remove nested CONFIG_ARM_64 --- xen/arch/arm/kernel.c | 4 ++++ xen/arch/arm/kernel.h | 4 ++++ xen/include/asm-arm/domain.h | 16 ++++++++++++++++ 3 files changed, 24 insertions(+), 0 deletions(-) diff --git a/xen/arch/arm/kernel.c b/xen/arch/arm/kernel.c index c08c230..0c7da54 100644 --- a/xen/arch/arm/kernel.c +++ b/xen/arch/arm/kernel.c @@ -228,6 +228,10 @@ int kernel_prepare(struct kernel_info *info) if (rc < 0) rc = kernel_try_elf_prepare(info, start, size); +#ifdef CONFIG_ARM_64 + info->type = DOMAIN_PV32; /* No 64-bit guest support yet */ +#endif + return rc; } diff --git a/xen/arch/arm/kernel.h b/xen/arch/arm/kernel.h index 49fe9da..7232d34 100644 --- a/xen/arch/arm/kernel.h +++ b/xen/arch/arm/kernel.h @@ -10,6 +10,10 @@ #include <xen/device_tree.h> struct kernel_info { +#ifdef CONFIG_ARM_64 + enum domain_type type; +#endif + void *fdt; /* flat device tree */ paddr_t unassigned_mem; /* RAM not (yet) assigned to a bank */ struct dt_mem_info mem; diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h index 29fe808..e9370a5 100644 --- a/xen/include/asm-arm/domain.h +++ b/xen/include/asm-arm/domain.h @@ -35,8 +35,24 @@ struct hvm_domain uint64_t params[HVM_NR_PARAMS]; } __cacheline_aligned; +#ifdef CONFIG_ARM_64 +enum domain_type { + DOMAIN_PV32, + DOMAIN_PV64, +}; +#define is_pv32_domain(d) ((d)->arch.type == DOMAIN_PV32) +#define is_pv64_domain(d) ((d)->arch.type == DOMAIN_PV64) +#else +#define is_pv32_domain(d) (1) +#define is_pv64_domain(d) (0) +#endif + struct arch_domain { +#ifdef CONFIG_ARM_64 + enum domain_type type; +#endif + struct p2m_domain p2m; struct hvm_domain hvm_domain; xen_pfn_t *grant_table_gpfn; -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 26/46] xen: arm: move arm32 specific trap handlers to xen/arch/arm/arm32
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- xen/arch/arm/arm32/Makefile | 3 +- xen/arch/arm/arm32/traps.c | 53 +++++++++++++++++++++++++++++++++++++++ xen/arch/arm/traps.c | 22 +--------------- xen/include/asm-arm/processor.h | 2 + 4 files changed, 58 insertions(+), 22 deletions(-) create mode 100644 xen/arch/arm/arm32/traps.c diff --git a/xen/arch/arm/arm32/Makefile b/xen/arch/arm/arm32/Makefile index 29898ae..1ad3364 100644 --- a/xen/arch/arm/arm32/Makefile +++ b/xen/arch/arm/arm32/Makefile @@ -4,4 +4,5 @@ obj-y += entry.o obj-y += mode_switch.o obj-y += proc-ca15.o -obj-y += domain.o +obj-y += traps.o +obj-y += domain.o \ No newline at end of file diff --git a/xen/arch/arm/arm32/traps.c b/xen/arch/arm/arm32/traps.c new file mode 100644 index 0000000..a93c2f7 --- /dev/null +++ b/xen/arch/arm/arm32/traps.c @@ -0,0 +1,53 @@ +/* + * xen/arch/arm/arm32/traps.c + * + * ARM AArch32 Specific Trap handlers + * + * Copyright (c) 2012 Citrix Systems. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <xen/config.h> +#include <xen/lib.h> + +#include <public/xen.h> + +#include <asm/processor.h> + +asmlinkage void do_trap_undefined_instruction(struct cpu_user_regs *regs) +{ + do_unexpected_trap("Undefined Instruction", regs); +} + +asmlinkage void do_trap_supervisor_call(struct cpu_user_regs *regs) +{ + do_unexpected_trap("Supervisor Call", regs); +} + +asmlinkage void do_trap_prefetch_abort(struct cpu_user_regs *regs) +{ + do_unexpected_trap("Prefetch Abort", regs); +} + +asmlinkage void do_trap_data_abort(struct cpu_user_regs *regs) +{ + do_unexpected_trap("Data Abort", regs); +} + +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index 0299b33..cb8a8d2 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -423,33 +423,13 @@ void vcpu_show_execution_state(struct vcpu *v) vcpu_unpause(v); } -static void do_unexpected_trap(const char *msg, struct cpu_user_regs *regs) +void do_unexpected_trap(const char *msg, struct cpu_user_regs *regs) { printk("Unexpected Trap: %s\n", msg); show_execution_state(regs); while(1); } -asmlinkage void do_trap_undefined_instruction(struct cpu_user_regs *regs) -{ - do_unexpected_trap("Undefined Instruction", regs); -} - -asmlinkage void do_trap_supervisor_call(struct cpu_user_regs *regs) -{ - do_unexpected_trap("Supervisor Call", regs); -} - -asmlinkage void do_trap_prefetch_abort(struct cpu_user_regs *regs) -{ - do_unexpected_trap("Prefetch Abort", regs); -} - -asmlinkage void do_trap_data_abort(struct cpu_user_regs *regs) -{ - do_unexpected_trap("Data Abort", regs); -} - unsigned long do_arch_0(unsigned int cmd, unsigned long long value) { printk("do_arch_0 cmd=%x arg=%llx\n", cmd, value); diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h index 230c901..bd473a8 100644 --- a/xen/include/asm-arm/processor.h +++ b/xen/include/asm-arm/processor.h @@ -253,6 +253,8 @@ void show_registers(struct cpu_user_regs *regs); #define cpu_to_core(_cpu) (0) #define cpu_to_socket(_cpu) (0) +void do_unexpected_trap(const char *msg, struct cpu_user_regs *regs); + void vcpu_regs_hyp_to_user(const struct vcpu *vcpu, struct vcpu_guest_core_regs *regs); void vcpu_regs_user_to_hyp(struct vcpu *vcpu, -- 1.7.2.5
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- v2: Call leave_hypervisor_tail on exit back to guest, disable interrupts while restoring state. --- xen/arch/arm/arm64/Makefile | 2 + xen/arch/arm/arm64/asm-offsets.c | 64 ++++++++++ xen/arch/arm/arm64/entry.S | 256 ++++++++++++++++++++++++++++++++++++++ xen/arch/arm/arm64/traps.c | 56 ++++++++ xen/arch/arm/io.h | 2 +- xen/arch/arm/setup.c | 2 +- xen/arch/arm/smpboot.c | 2 +- xen/arch/arm/traps.c | 17 ++- xen/include/asm-arm/cpregs.h | 1 + xen/include/asm-arm/processor.h | 2 +- 10 files changed, 396 insertions(+), 8 deletions(-) create mode 100644 xen/arch/arm/arm64/asm-offsets.c create mode 100644 xen/arch/arm/arm64/entry.S create mode 100644 xen/arch/arm/arm64/traps.c diff --git a/xen/arch/arm/arm64/Makefile b/xen/arch/arm/arm64/Makefile index 815f305..be41f43 100644 --- a/xen/arch/arm/arm64/Makefile +++ b/xen/arch/arm/arm64/Makefile @@ -1,5 +1,7 @@ subdir-y += lib +obj-y += entry.o obj-y += mode_switch.o +obj-y += traps.o obj-y += domain.o diff --git a/xen/arch/arm/arm64/asm-offsets.c b/xen/arch/arm/arm64/asm-offsets.c new file mode 100644 index 0000000..691d6d5 --- /dev/null +++ b/xen/arch/arm/arm64/asm-offsets.c @@ -0,0 +1,64 @@ +/* + * Generate definitions needed by assembly language modules. + * This code generates raw asm output which is post-processed + * to extract and format the required data. + */ +#define COMPILE_OFFSETS + +#include <xen/config.h> +#include <xen/types.h> +#include <xen/sched.h> +#include <public/xen.h> +#include <asm/current.h> + +#define DEFINE(_sym, _val) \ + __asm__ __volatile__ ( "\n->" #_sym " %0 " #_val : : "i" (_val) ) +#define BLANK() \ + __asm__ __volatile__ ( "\n->" : : ) +#define OFFSET(_sym, _str, _mem) \ + DEFINE(_sym, offsetof(_str, _mem)); + +/* base-2 logarithm */ +#define __L2(_x) (((_x) & 0x00000002) ? 1 : 0) +#define __L4(_x) (((_x) & 0x0000000c) ? ( 2 + __L2( (_x)>> 2)) : __L2( _x)) +#define __L8(_x) (((_x) & 0x000000f0) ? ( 4 + __L4( (_x)>> 4)) : __L4( _x)) +#define __L16(_x) (((_x) & 0x0000ff00) ? ( 8 + __L8( (_x)>> 8)) : __L8( _x)) +#define LOG_2(_x) (((_x) & 0xffff0000) ? (16 + __L16((_x)>>16)) : __L16(_x)) + +void __dummy__(void) +{ + OFFSET(UREGS_X0, struct cpu_user_regs, x0); + OFFSET(UREGS_LR, struct cpu_user_regs, lr); + + OFFSET(UREGS_SP, struct cpu_user_regs, sp); + OFFSET(UREGS_PC, struct cpu_user_regs, pc); + OFFSET(UREGS_CPSR, struct cpu_user_regs, cpsr); + + OFFSET(UREGS_SPSR_el1, struct cpu_user_regs, spsr_el1); + + OFFSET(UREGS_SPSR_fiq, struct cpu_user_regs, spsr_fiq); + OFFSET(UREGS_SPSR_irq, struct cpu_user_regs, spsr_irq); + OFFSET(UREGS_SPSR_und, struct cpu_user_regs, spsr_und); + OFFSET(UREGS_SPSR_abt, struct cpu_user_regs, spsr_abt); + + OFFSET(UREGS_SP_el0, struct cpu_user_regs, sp_el0); + OFFSET(UREGS_SP_el1, struct cpu_user_regs, sp_el1); + OFFSET(UREGS_ELR_el1, struct cpu_user_regs, elr_el1); + + OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, cpsr); + DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs)); + BLANK(); + + DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info)); + + OFFSET(VCPU_arch_saved_context, struct vcpu, arch.saved_context); +} + +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S new file mode 100644 index 0000000..1b2c4ad --- /dev/null +++ b/xen/arch/arm/arm64/entry.S @@ -0,0 +1,256 @@ +#include <xen/config.h> +#include <asm/asm_defns.h> +#include <public/xen.h> + +/* + * Register aliases. + */ +lr .req x30 // link register + +/* + * Stack pushing/popping (register pairs only). Equivalent to store decrement + * before, load increment after. + */ + .macro push, xreg1, xreg2 + stp \xreg1, \xreg2, [sp, #-16]! + .endm + + .macro pop, xreg1, xreg2 + ldp \xreg1, \xreg2, [sp], #16 + .endm + +/* + * Save/restore guest mode specific state, outer stack frame + */ + .macro entry_guest, compat + + add x21, sp, #UREGS_SPSR_el1 + mrs x23, SPSR_EL1 + str x23, [x21] + + .if \compat == 0 /* Aarch64 mode */ + + add x21, sp, #UREGS_SP_el0 + mrs x22, SP_el0 + str x22, [x21] + + add x21, sp, #UREGS_ELR_el1 + mrs x22, SP_el1 + mrs x23, ELR_el1 + stp x22, x23, [x21] + + .else /* Aarch32 mode */ + + add x21, sp, #UREGS_SPSR_fiq + mrs x22, spsr_fiq + mrs x23, spsr_irq + stp w22, w23, [x21] + + add x21, sp, #UREGS_SPSR_und + mrs x22, spsr_und + mrs x23, spsr_abt + stp w22, w23, [x21] + + .endif + + .endm + +/* + * Save state on entry to hypervisor + */ + .macro entry, hyp, compat + sub sp, sp, #(UREGS_SPSR_el1 - UREGS_SP) + push x28, x29 + push x26, x27 + push x24, x25 + push x22, x23 + push x20, x21 + push x18, x19 + push x16, x17 + push x14, x15 + push x12, x13 + push x10, x11 + push x8, x9 + push x6, x7 + push x4, x5 + push x2, x3 + push x0, x1 + + .if \hyp == 1 /* Hypervisor mode */ + + add x21, sp, #(UREGS_X0 - UREGS_SP) + + .else /* Guest mode */ + + entry_guest \compat + mov x21, ~0 /* sp only valid for hyp frame XXX */ + + .endif + + stp lr, x21, [sp, #UREGS_LR] + + mrs x22, elr_el2 + mrs x23, spsr_el2 + stp x22, x23, [sp, #UREGS_PC] + + .endm + +/* + * Bad Abort numbers + *----------------- + */ +#define BAD_SYNC 0 +#define BAD_IRQ 1 +#define BAD_FIQ 2 +#define BAD_ERROR 3 + + .macro invalid, reason + mov x0, sp + mov x1, #\reason + b do_bad_mode + .endm + +hyp_sync_invalid: + entry hyp=1 + invalid BAD_SYNC + +hyp_irq_invalid: + entry hyp=1 + invalid BAD_IRQ + +hyp_fiq_invalid: + entry hyp=1 + invalid BAD_FIQ + +hyp_error_invalid: + entry hyp=1 + invalid BAD_ERROR + +/* Traps taken in Current EL with SP_ELx */ +hyp_sync: + entry hyp=1 + msr daifclr, #2 + adr lr, return_to_hypervisor + mov x0, sp + b do_trap_hypervisor + +hyp_irq: + entry hyp=1 + adr lr, return_to_hypervisor + mov x0, sp + b do_trap_irq + +guest_sync: + entry hyp=0, compat=0 + invalid BAD_SYNC /* No AArch64 guest support yet */ + +guest_irq: + entry hyp=0, compat=0 + invalid BAD_IRQ /* No AArch64 guest support yet */ + +guest_fiq_invalid: + entry hyp=0, compat=0 + invalid BAD_FIQ + +guest_error_invalid: + entry hyp=0, compat=0 + invalid BAD_ERROR + +guest_sync_compat: + entry hyp=0, compat=1 + msr daifclr, #2 + adr lr, return_to_guest + mov x0, sp + b do_trap_hypervisor + +guest_irq_compat: + entry hyp=0, compat=1 + adr lr, return_to_guest + mov x0, sp + b do_trap_irq + +guest_fiq_invalid_compat: + entry hyp=0, compat=1 + invalid BAD_FIQ + +guest_error_invalid_compat: + entry hyp=0, compat=1 + invalid BAD_ERROR + +ENTRY(return_to_new_vcpu) + ldr x21, [sp, #UREGS_CPSR] + and x21, x21, #PSR_MODE_MASK + /* Returning to EL2? */ + cmp x21, #PSR_MODE_EL2t + ccmp x21, #PSR_MODE_EL2h, #0x4, ne + b.eq return_to_hypervisor /* Yes */ + /* Fall thru */ +ENTRY(return_to_guest) + bl leave_hypervisor_tail /* Disables interrupts on return */ + /* Fall thru */ +ENTRY(return_to_hypervisor) + msr daifset, #2 /* Mask interrupts */ + + ldp x21, x22, [sp, #UREGS_PC] // load ELR, SPSR + + pop x0, x1 + pop x2, x3 + pop x4, x5 + pop x6, x7 + pop x8, x9 + + /* XXX handle return to guest tasks, soft irqs etc */ + + msr elr_el2, x21 // set up the return data + msr spsr_el2, x22 + + pop x10, x11 + pop x12, x13 + pop x14, x15 + pop x16, x17 + pop x18, x19 + pop x20, x21 + pop x22, x23 + pop x24, x25 + pop x26, x27 + pop x28, x29 + + ldr lr, [sp], #(UREGS_SPSR_el1 - UREGS_SP) + eret + +/* + * Exception vectors. + */ + .macro ventry label + .align 7 + b \label + .endm + + .align 11 +ENTRY(hyp_traps_vector) + ventry hyp_sync_invalid // Synchronous EL2t + ventry hyp_irq_invalid // IRQ EL2t + ventry hyp_fiq_invalid // FIQ EL2t + ventry hyp_error_invalid // Error EL2t + + ventry hyp_sync // Synchronous EL2h + ventry hyp_irq // IRQ EL2h + ventry hyp_fiq_invalid // FIQ EL2h + ventry hyp_error_invalid // Error EL2h + + ventry guest_sync // Synchronous 64-bit EL0/EL1 + ventry guest_irq // IRQ 64-bit EL0/EL1 + ventry guest_fiq_invalid // FIQ 64-bit EL0/EL1 + ventry guest_error_invalid // Error 64-bit EL0/EL1 + + ventry guest_sync_compat // Synchronous 32-bit EL0/EL1 + ventry guest_irq_compat // IRQ 32-bit EL0/EL1 + ventry guest_fiq_invalid_compat // FIQ 32-bit EL0/EL1 + ventry guest_error_invalid_compat // Error 32-bit EL0/EL1 + +/* + * Local variables: + * mode: ASM + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/arm64/traps.c b/xen/arch/arm/arm64/traps.c new file mode 100644 index 0000000..02ef992 --- /dev/null +++ b/xen/arch/arm/arm64/traps.c @@ -0,0 +1,56 @@ +/* + * xen/arch/arm/arm64/traps.c + * + * ARM AArch64 Specific Trap handlers + * + * Copyright (c) 2012 Citrix Systems. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <xen/config.h> +#include <xen/lib.h> + +#include <asm/system.h> +#include <asm/processor.h> + +#include <public/xen.h> + +asmlinkage void do_trap_serror(struct cpu_user_regs *regs) +{ + panic("Unhandled serror trap\n"); +} + +static const char *handler[]= { + "Synchronous Abort", + "IRQ", + "FIQ", + "Error" +}; + +asmlinkage void do_bad_mode(struct cpu_user_regs *regs, int reason) +{ + uint64_t esr = READ_SYSREG64(ESR_EL2); + printk("Bad mode in %s handler detected, code 0x%08"PRIx64"\n", + handler[reason], esr); + + local_irq_disable(); + panic("bad mode"); +} + +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/io.h b/xen/arch/arm/io.h index 0933aa8..883afd8 100644 --- a/xen/arch/arm/io.h +++ b/xen/arch/arm/io.h @@ -26,7 +26,7 @@ typedef struct { struct hsr_dabt dabt; - uint32_t gva; + vaddr_t gva; paddr_t gpa; } mmio_info_t; diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c index c1f06c9..299848e 100644 --- a/xen/arch/arm/setup.c +++ b/xen/arch/arm/setup.c @@ -387,7 +387,7 @@ void __init start_xen(unsigned long boot_phys_offset, setup_mm(fdt_paddr, fdt_size); /* Setup Hyp vector base */ - WRITE_SYSREG((vaddr_t)hyp_traps_vector, VBAR_EL2); + WRITE_SYSREG((vaddr_t)&hyp_traps_vector, VBAR_EL2); isb(); /* Setup Stage 2 address translation */ diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c index d8eb5d3..b18f137 100644 --- a/xen/arch/arm/smpboot.c +++ b/xen/arch/arm/smpboot.c @@ -142,7 +142,7 @@ void __cpuinit start_secondary(unsigned long boot_phys_offset, set_processor_id(cpuid); /* Setup Hyp vector base */ - WRITE_CP32((register_t) hyp_traps_vector, HVBAR); + WRITE_SYSREG((vaddr_t)&hyp_traps_vector, VBAR_EL2); mmu_init_secondary_cpu(); enable_vfp(); diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index cb8a8d2..d6bdaa7 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -628,7 +628,7 @@ static void do_cp15_64(struct cpu_user_regs *regs, } -void dump_guest_s1_walk(struct domain *d, uint32_t addr) +void dump_guest_s1_walk(struct domain *d, vaddr_t addr) { uint32_t ttbcr = READ_CP32(TTBCR); uint32_t ttbr0 = READ_CP32(TTBR0); @@ -636,7 +636,7 @@ void dump_guest_s1_walk(struct domain *d, uint32_t addr) uint32_t offset; uint32_t *first = NULL, *second = NULL; - printk("dom%d VA 0x%08"PRIx32"\n", d->domain_id, addr); + printk("dom%d VA 0x%08"PRIvaddr"\n", d->domain_id, addr); printk(" TTBCR: 0x%08"PRIx32"\n", ttbcr); printk(" TTBR0: 0x%08"PRIx32" = 0x%"PRIpaddr"\n", ttbr0, p2m_lookup(d, ttbr0 & PAGE_MASK)); @@ -692,7 +692,11 @@ static void do_trap_data_abort_guest(struct cpu_user_regs *regs, mmio_info_t info; info.dabt = dabt; +#ifdef CONFIG_ARM_32 info.gva = READ_CP32(HDFAR); +#else + info.gva = READ_SYSREG64(FAR_EL2); +#endif if (dabt.s1ptw) goto bad_data_abort; @@ -713,7 +717,7 @@ bad_data_abort: /* XXX inject a suitable fault into the guest */ printk("Guest data abort: %s%s%s\n" - " gva=%"PRIx32"\n", + " gva=%"PRIvaddr"\n", msg, dabt.s1ptw ? " S2 during S1" : "", fsc_level_str(level), info.gva); @@ -736,13 +740,17 @@ bad_data_abort: asmlinkage void do_trap_hypervisor(struct cpu_user_regs *regs) { - union hsr hsr = { .bits = READ_CP32(HSR) }; + union hsr hsr = { .bits = READ_SYSREG32(ESR_EL2) }; switch (hsr.ec) { case HSR_EC_CP15_32: + if ( ! is_pv32_domain(current->domain) ) + goto bad_trap; do_cp15_32(regs, hsr); break; case HSR_EC_CP15_64: + if ( ! is_pv32_domain(current->domain) ) + goto bad_trap; do_cp15_64(regs, hsr); break; case HSR_EC_HVC: @@ -754,6 +762,7 @@ asmlinkage void do_trap_hypervisor(struct cpu_user_regs *regs) do_trap_data_abort_guest(regs, hsr.dabt); break; default: + bad_trap: printk("Hypervisor Trap. HSR=0x%x EC=0x%x IL=%x Syndrome=%"PRIx32"\n", hsr.bits, hsr.ec, hsr.len, hsr.iss); do_unexpected_trap("Hypervisor", regs); diff --git a/xen/include/asm-arm/cpregs.h b/xen/include/asm-arm/cpregs.h index 36da12e..75b6287 100644 --- a/xen/include/asm-arm/cpregs.h +++ b/xen/include/asm-arm/cpregs.h @@ -228,6 +228,7 @@ #define CCSIDR_EL1 CCSIDR #define CLIDR_EL1 CLIDR #define CSSELR_EL1 CSSELR +#define ESR_EL2 HSR #define ID_AFR0_EL1 ID_AFR0 #define ID_DFR0_EL1 ID_DFR0 #define ID_ISAR0_EL1 ID_ISAR0 diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h index bd473a8..396ec41 100644 --- a/xen/include/asm-arm/processor.h +++ b/xen/include/asm-arm/processor.h @@ -238,7 +238,7 @@ union hsr { #endif #ifndef __ASSEMBLY__ -extern uint32_t hyp_traps_vector[8]; +extern uint32_t hyp_traps_vector; void panic_PAR(uint64_t par); -- 1.7.2.5
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- xen/arch/arm/arm64/entry.S | 30 ++++++++++++++++++++++++++++++ xen/arch/arm/domain.c | 4 ++-- xen/include/asm-arm/domain.h | 33 +++++++++++++++++++++++---------- 3 files changed, 55 insertions(+), 12 deletions(-) diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S index 1b2c4ad..a09dfcb 100644 --- a/xen/arch/arm/arm64/entry.S +++ b/xen/arch/arm/arm64/entry.S @@ -249,6 +249,36 @@ ENTRY(hyp_traps_vector) ventry guest_error_invalid_compat // Error 32-bit EL0/EL1 /* + * struct vcpu *__context_switch(struct vcpu *prev, struct vcpu *next) + * + * x0 - prev + * x1 - next + * + * Returns prev in x0 + */ +ENTRY(__context_switch) + add x8, x0, #VCPU_arch_saved_context + mov x9, sp + stp x19, x20, [x8], #16 // store callee-saved registers + stp x21, x22, [x8], #16 + stp x23, x24, [x8], #16 + stp x25, x26, [x8], #16 + stp x27, x28, [x8], #16 + stp x29, x9, [x8], #16 + str lr, [x8] + + add x8, x1, #VCPU_arch_saved_context + ldp x19, x20, [x8], #16 // restore callee-saved registers + ldp x21, x22, [x8], #16 + ldp x23, x24, [x8], #16 + ldp x25, x26, [x8], #16 + ldp x27, x28, [x8], #16 + ldp x29, x9, [x8], #16 + ldr lr, [x8] + mov sp, x9 + ret + +/* * Local variables: * mode: ASM * indent-tabs-mode: nil diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c index 3651fb2..f74caf4 100644 --- a/xen/arch/arm/domain.c +++ b/xen/arch/arm/domain.c @@ -387,8 +387,8 @@ int vcpu_initialise(struct vcpu *v) - sizeof(struct cpu_info)); memset(&v->arch.saved_context, 0, sizeof(v->arch.saved_context)); - v->arch.saved_context.sp = (uint32_t)v->arch.cpu_info; - v->arch.saved_context.pc = (uint32_t)continue_new_vcpu; + v->arch.saved_context.sp = (register_t)v->arch.cpu_info; + v->arch.saved_context.pc = (register_t)continue_new_vcpu; /* Idle VCPUs don''t need the rest of this setup */ if ( is_idle_vcpu(v) ) diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h index e9370a5..ff6214b 100644 --- a/xen/include/asm-arm/domain.h +++ b/xen/include/asm-arm/domain.h @@ -99,16 +99,29 @@ struct vtimer { struct arch_vcpu { struct { - uint32_t r4; - uint32_t r5; - uint32_t r6; - uint32_t r7; - uint32_t r8; - uint32_t r9; - uint32_t sl; - uint32_t fp; - uint32_t sp; - uint32_t pc; +#ifdef CONFIG_ARM_32 + register_t r4; + register_t r5; + register_t r6; + register_t r7; + register_t r8; + register_t r9; + register_t sl; +#else + register_t x19; + register_t x20; + register_t x21; + register_t x22; + register_t x23; + register_t x24; + register_t x25; + register_t x26; + register_t x27; + register_t x28; +#endif + register_t fp; + register_t sp; + register_t pc; } saved_context; void *stack; -- 1.7.2.5
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- xen/include/asm-arm/cpregs.h | 1 + xen/include/asm-arm/percpu.h | 5 ++--- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/xen/include/asm-arm/cpregs.h b/xen/include/asm-arm/cpregs.h index 75b6287..dc69a06 100644 --- a/xen/include/asm-arm/cpregs.h +++ b/xen/include/asm-arm/cpregs.h @@ -244,6 +244,7 @@ #define ID_PFR0_EL1 ID_PFR0 #define ID_PFR1_EL1 ID_PFR1 #define SCTLR_EL2 HSCTLR +#define TPIDR_EL2 HTPIDR #define TTBR0_EL2 HTTBR #define VBAR_EL2 HVBAR #define VTCR_EL2 VTCR diff --git a/xen/include/asm-arm/percpu.h b/xen/include/asm-arm/percpu.h index ab27292..e955136 100644 --- a/xen/include/asm-arm/percpu.h +++ b/xen/include/asm-arm/percpu.h @@ -11,18 +11,17 @@ void percpu_init_areas(void); __section(".bss.percpu" #suffix) \ __typeof__(type) per_cpu_##name - #define per_cpu(var, cpu) \ (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu])) #define __get_cpu_var(var) \ - (*RELOC_HIDE(&per_cpu__##var, READ_CP32(HTPIDR))) + (*RELOC_HIDE(&per_cpu__##var, READ_SYSREG(TPIDR_EL2))) #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name DECLARE_PER_CPU(unsigned int, cpu_id); #define get_processor_id() (this_cpu(cpu_id)) #define set_processor_id(id) do { \ - WRITE_CP32(__per_cpu_offset[id], HTPIDR); \ + WRITE_SYSREG(__per_cpu_offset[id], TPIDR_EL2); \ this_cpu(cpu_id) = (id); \ } while(0) #endif -- 1.7.2.5
One side effect of this is that we now save the full 64-bit TTBR[0,1] even on a 32-bit hypervisor. This is needed anyway to support LPAE guests (although this patch doesn''t implement anything other than the context switch). Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- v2: Nuke XXX and rationalise naming: s/tpidrurw/tpidr_el0/ s/tpidrprw/tpidr_el1/ s/tpidruro/tpidrro_el0/ --- xen/arch/arm/domain.c | 113 +++++++++++++++++++++++++----------------- xen/arch/arm/traps.c | 14 +++--- xen/include/asm-arm/cpregs.h | 21 +++++++- xen/include/asm-arm/domain.h | 29 ++++++++--- 4 files changed, 115 insertions(+), 62 deletions(-) diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c index f74caf4..e0707ff 100644 --- a/xen/arch/arm/domain.c +++ b/xen/arch/arm/domain.c @@ -43,55 +43,67 @@ void idle_loop(void) static void ctxt_switch_from(struct vcpu *p) { /* CP 15 */ - p->arch.csselr = READ_CP32(CSSELR); + p->arch.csselr = READ_SYSREG(CSSELR_EL1); /* Control Registers */ - p->arch.actlr = READ_CP32(ACTLR); - p->arch.sctlr = READ_CP32(SCTLR); - p->arch.cpacr = READ_CP32(CPACR); + p->arch.actlr = READ_SYSREG(ACTLR_EL1); + p->arch.sctlr = READ_SYSREG(SCTLR_EL1); + p->arch.cpacr = READ_SYSREG(CPACR_EL1); - p->arch.contextidr = READ_CP32(CONTEXTIDR); - p->arch.tpidrurw = READ_CP32(TPIDRURW); - p->arch.tpidruro = READ_CP32(TPIDRURO); - p->arch.tpidrprw = READ_CP32(TPIDRPRW); + p->arch.contextidr = READ_SYSREG(CONTEXTIDR_EL1); + p->arch.tpidr_el0 = READ_SYSREG(TPIDR_EL0); + p->arch.tpidrro_el0 = READ_SYSREG(TPIDRRO_EL0); + p->arch.tpidr_el1 = READ_SYSREG(TPIDR_EL1); /* Arch timer */ virt_timer_save(p); +#if defined(CONFIG_ARM_32) /* XXX only save these if ThumbEE e.g. ID_PFR0.THUMB_EE_SUPPORT */ p->arch.teecr = READ_CP32(TEECR); p->arch.teehbr = READ_CP32(TEEHBR); p->arch.joscr = READ_CP32(JOSCR); p->arch.jmcr = READ_CP32(JMCR); +#endif isb(); /* MMU */ - p->arch.vbar = READ_CP32(VBAR); - p->arch.ttbcr = READ_CP32(TTBCR); - /* XXX save 64 bit TTBR if guest is LPAE */ - p->arch.ttbr0 = READ_CP32(TTBR0); - p->arch.ttbr1 = READ_CP32(TTBR1); - - p->arch.dacr = READ_CP32(DACR); - p->arch.par = READ_CP64(PAR); + p->arch.vbar = READ_SYSREG(VBAR_EL1); + p->arch.ttbcr = READ_SYSREG(TCR_EL1); + p->arch.ttbr0 = READ_SYSREG64(TTBR0_EL1); + p->arch.ttbr1 = READ_SYSREG64(TTBR1_EL1); + if ( is_pv32_domain(p->domain) ) + p->arch.dacr = READ_SYSREG(DACR32_EL2); + p->arch.par = READ_SYSREG64(PAR_EL1); +#if defined(CONFIG_ARM_32) p->arch.mair0 = READ_CP32(MAIR0); p->arch.mair1 = READ_CP32(MAIR1); +#else + p->arch.mair = READ_SYSREG64(MAIR_EL1); +#endif /* Fault Status */ +#if defined(CONFIG_ARM_32) p->arch.dfar = READ_CP32(DFAR); p->arch.ifar = READ_CP32(IFAR); p->arch.dfsr = READ_CP32(DFSR); - p->arch.ifsr = READ_CP32(IFSR); - p->arch.adfsr = READ_CP32(ADFSR); - p->arch.aifsr = READ_CP32(AIFSR); +#elif defined(CONFIG_ARM_64) + p->arch.far = READ_SYSREG64(FAR_EL1); + p->arch.esr = READ_SYSREG64(ESR_EL1); +#endif + + if ( is_pv32_domain(p->domain) ) + p->arch.ifsr = READ_SYSREG(IFSR32_EL2); + p->arch.afsr0 = READ_SYSREG(AFSR0_EL1); + p->arch.afsr1 = READ_SYSREG(AFSR1_EL1); /* XXX MPU */ /* XXX VFP */ - /* XXX VGIC */ + /* VGIC */ gic_save_state(p); isb(); @@ -100,16 +112,16 @@ static void ctxt_switch_from(struct vcpu *p) static void ctxt_switch_to(struct vcpu *n) { - uint32_t hcr; + register_t hcr; - hcr = READ_CP32(HCR); - WRITE_CP32(hcr & ~HCR_VM, HCR); + hcr = READ_SYSREG(HCR_EL2); + WRITE_SYSREG(hcr & ~HCR_VM, HCR_EL2); isb(); p2m_load_VTTBR(n->domain); isb(); - /* XXX VGIC */ + /* VGIC */ gic_restore_state(n); /* XXX VFP */ @@ -117,51 +129,62 @@ static void ctxt_switch_to(struct vcpu *n) /* XXX MPU */ /* Fault Status */ +#if defined(CONFIG_ARM_32) WRITE_CP32(n->arch.dfar, DFAR); WRITE_CP32(n->arch.ifar, IFAR); WRITE_CP32(n->arch.dfsr, DFSR); - WRITE_CP32(n->arch.ifsr, IFSR); - WRITE_CP32(n->arch.adfsr, ADFSR); - WRITE_CP32(n->arch.aifsr, AIFSR); +#elif defined(CONFIG_ARM_64) + WRITE_SYSREG64(n->arch.far, FAR_EL1); + WRITE_SYSREG64(n->arch.esr, ESR_EL1); +#endif + + if ( is_pv32_domain(n->domain) ) + WRITE_SYSREG(n->arch.ifsr, IFSR32_EL2); + WRITE_SYSREG(n->arch.afsr0, AFSR0_EL1); + WRITE_SYSREG(n->arch.afsr1, AFSR1_EL1); /* MMU */ - WRITE_CP32(n->arch.vbar, VBAR); - WRITE_CP32(n->arch.ttbcr, TTBCR); - /* XXX restore 64 bit TTBR if guest is LPAE */ - WRITE_CP32(n->arch.ttbr0, TTBR0); - WRITE_CP32(n->arch.ttbr1, TTBR1); - - WRITE_CP32(n->arch.dacr, DACR); - WRITE_CP64(n->arch.par, PAR); + WRITE_SYSREG(n->arch.vbar, VBAR_EL1); + WRITE_SYSREG(n->arch.ttbcr, TCR_EL1); + WRITE_SYSREG64(n->arch.ttbr0, TTBR0_EL1); + WRITE_SYSREG64(n->arch.ttbr1, TTBR1_EL1); + if ( is_pv32_domain(n->domain) ) + WRITE_SYSREG(n->arch.dacr, DACR32_EL2); + WRITE_SYSREG64(n->arch.par, PAR_EL1); +#if defined(CONFIG_ARM_32) WRITE_CP32(n->arch.mair0, MAIR0); WRITE_CP32(n->arch.mair1, MAIR1); +#elif defined(CONFIG_ARM_64) + WRITE_SYSREG64(n->arch.mair, MAIR_EL1); +#endif isb(); /* Control Registers */ - WRITE_CP32(n->arch.actlr, ACTLR); - WRITE_CP32(n->arch.sctlr, SCTLR); - WRITE_CP32(n->arch.cpacr, CPACR); + WRITE_SYSREG(n->arch.actlr, ACTLR_EL1); + WRITE_SYSREG(n->arch.sctlr, SCTLR_EL1); + WRITE_SYSREG(n->arch.cpacr, CPACR_EL1); - WRITE_CP32(n->arch.contextidr, CONTEXTIDR); - WRITE_CP32(n->arch.tpidrurw, TPIDRURW); - WRITE_CP32(n->arch.tpidruro, TPIDRURO); - WRITE_CP32(n->arch.tpidrprw, TPIDRPRW); + WRITE_SYSREG(n->arch.contextidr, CONTEXTIDR_EL1); + WRITE_SYSREG(n->arch.tpidr_el0, TPIDR_EL0); + WRITE_SYSREG(n->arch.tpidrro_el0, TPIDRRO_EL0); + WRITE_SYSREG(n->arch.tpidr_el1, TPIDR_EL1); +#if defined(CONFIG_ARM_32) /* XXX only restore these if ThumbEE e.g. ID_PFR0.THUMB_EE_SUPPORT */ WRITE_CP32(n->arch.teecr, TEECR); WRITE_CP32(n->arch.teehbr, TEEHBR); WRITE_CP32(n->arch.joscr, JOSCR); WRITE_CP32(n->arch.jmcr, JMCR); - +#endif isb(); /* CP 15 */ - WRITE_CP32(n->arch.csselr, CSSELR); + WRITE_SYSREG(n->arch.csselr, CSSELR_EL1); isb(); - WRITE_CP32(hcr, HCR); + WRITE_SYSREG(hcr, HCR_EL2); isb(); /* This is could trigger an hardware interrupt from the virtual diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index d6bdaa7..97a29fb 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -214,8 +214,8 @@ void panic_PAR(uint64_t par) } struct reg_ctxt { - uint32_t sctlr; - uint32_t ttbr0, ttbr1, ttbcr; + uint32_t sctlr, ttbcr; + uint64_t ttbr0, ttbr1; }; static void _show_registers(struct cpu_user_regs *regs, struct reg_ctxt *ctxt, @@ -265,7 +265,7 @@ static void _show_registers(struct cpu_user_regs *regs, printk("FIQ: R8: %08"PRIx32" R9: %08"PRIx32" R10:%08"PRIx32" R11:%08"PRIx32" R12:%08"PRIx32"\n", regs->r8_fiq, regs->r9_fiq, regs->r10_fiq, regs->r11_fiq, regs->r11_fiq); printk("\n"); - printk("TTBR0 %08"PRIx32" TTBR1 %08"PRIx32" TTBCR %08"PRIx32"\n", + printk("TTBR0 %010"PRIx64" TTBR1 %010"PRIx64" TTBCR %08"PRIx32"\n", ctxt->ttbr0, ctxt->ttbr1, ctxt->ttbcr); printk("SCTLR %08"PRIx32"\n", ctxt->sctlr); printk("VTTBR %010"PRIx64"\n", READ_CP64(VTTBR)); @@ -295,8 +295,8 @@ void show_registers(struct cpu_user_regs *regs) struct reg_ctxt ctxt; ctxt.sctlr = READ_CP32(SCTLR); ctxt.ttbcr = READ_CP32(TTBCR); - ctxt.ttbr0 = READ_CP32(TTBR0); - ctxt.ttbr1 = READ_CP32(TTBR1); + ctxt.ttbr0 = READ_CP64(TTBR0); + ctxt.ttbr1 = READ_CP64(TTBR1); _show_registers(regs, &ctxt, guest_mode(regs)); } @@ -631,14 +631,14 @@ static void do_cp15_64(struct cpu_user_regs *regs, void dump_guest_s1_walk(struct domain *d, vaddr_t addr) { uint32_t ttbcr = READ_CP32(TTBCR); - uint32_t ttbr0 = READ_CP32(TTBR0); + uint64_t ttbr0 = READ_CP64(TTBR0); paddr_t paddr; uint32_t offset; uint32_t *first = NULL, *second = NULL; printk("dom%d VA 0x%08"PRIvaddr"\n", d->domain_id, addr); printk(" TTBCR: 0x%08"PRIx32"\n", ttbcr); - printk(" TTBR0: 0x%08"PRIx32" = 0x%"PRIpaddr"\n", + printk(" TTBR0: 0x%010"PRIx64" = 0x%"PRIpaddr"\n", ttbr0, p2m_lookup(d, ttbr0 & PAGE_MASK)); if ( ttbcr & TTBCR_EAE ) diff --git a/xen/include/asm-arm/cpregs.h b/xen/include/asm-arm/cpregs.h index dc69a06..732f967 100644 --- a/xen/include/asm-arm/cpregs.h +++ b/xen/include/asm-arm/cpregs.h @@ -106,9 +106,9 @@ #define HCR p15,4,c1,c1,0 /* Hyp. Configuration Register */ /* CP15 CR2: Translation Table Base and Control Registers */ -#define TTBR0 p15,0,c2,c0,0 /* Translation Table Base Reg. 0 */ -#define TTBR1 p15,0,c2,c0,1 /* Translation Table Base Reg. 1 */ #define TTBCR p15,0,c2,c0,2 /* Translatation Table Base Control Register */ +#define TTBR0 p15,0,c2 /* Translation Table Base Reg. 0 */ +#define TTBR1 p15,1,c2 /* Translation Table Base Reg. 1 */ #define HTTBR p15,4,c2 /* Hyp. Translation Table Base Register */ #define HTCR p15,4,c2,c0,2 /* Hyp. Translation Control Register */ #define VTCR p15,4,c2,c1,2 /* Virtualization Translation Control Register */ @@ -225,10 +225,17 @@ /* Aliases of AArch64 names for use in common code when building for AArch32 */ #ifdef CONFIG_ARM_32 /* Alphabetically... */ +#define ACTLR_EL1 ACTLR +#define AFSR0_EL1 ADFSR +#define AFSR1_EL1 AIFSR #define CCSIDR_EL1 CCSIDR #define CLIDR_EL1 CLIDR +#define CONTEXTIDR_EL1 CONTEXTIDR +#define CPACR_EL1 CPACR #define CSSELR_EL1 CSSELR +#define DACR32_EL2 DACR #define ESR_EL2 HSR +#define HCR_EL2 HCR #define ID_AFR0_EL1 ID_AFR0 #define ID_DFR0_EL1 ID_DFR0 #define ID_ISAR0_EL1 ID_ISAR0 @@ -243,9 +250,19 @@ #define ID_MMFR3_EL1 ID_MMFR3 #define ID_PFR0_EL1 ID_PFR0 #define ID_PFR1_EL1 ID_PFR1 +#define IFSR32_EL2 IFSR +#define PAR_EL1 PAR +#define SCTLR_EL1 SCTLR #define SCTLR_EL2 HSCTLR +#define TCR_EL1 TTBCR +#define TPIDRRO_EL0 TPIDRURO +#define TPIDR_EL0 TPIDRURW +#define TPIDR_EL1 TPIDRPRW #define TPIDR_EL2 HTPIDR +#define TTBR0_EL1 TTBR0 #define TTBR0_EL2 HTTBR +#define TTBR1_EL1 TTBR1 +#define VBAR_EL1 VBAR #define VBAR_EL2 HVBAR #define VTCR_EL2 VTCR diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h index ff6214b..4a4bf2f 100644 --- a/xen/include/asm-arm/domain.h +++ b/xen/include/asm-arm/domain.h @@ -133,30 +133,43 @@ struct arch_vcpu struct cpu_info *cpu_info; /* Fault Status */ +#ifdef CONFIG_ARM_32 + uint32_t dfsr; uint32_t dfar, ifar; - uint32_t dfsr, ifsr; - uint32_t adfsr, aifsr; +#else + uint64_t far; + uint32_t esr; +#endif + + uint32_t ifsr; /* 32-bit guests only */ + uint32_t afsr0, afsr1; /* MMU */ - uint32_t vbar; + register_t vbar; uint32_t ttbcr; - uint32_t ttbr0, ttbr1; + uint64_t ttbr0, ttbr1; - uint32_t dacr; + uint32_t dacr; /* 32-bit guests only */ uint64_t par; +#ifdef CONFIG_ARM_32 uint32_t mair0, mair1; +#else + uint64_t mair; +#endif /* Control Registers */ uint32_t actlr, sctlr; uint32_t cpacr; uint32_t contextidr; - uint32_t tpidrurw; - uint32_t tpidruro; - uint32_t tpidrprw; + register_t tpidr_el0; + register_t tpidr_el1; + register_t tpidrro_el0; +#ifdef CONFIG_ARM_32 uint32_t teecr, teehbr; uint32_t joscr, jmcr; +#endif /* CP 15 */ uint32_t csselr; -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 31/46] xen: arm: show_registers() support for 64-bit.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- xen/arch/arm/traps.c | 176 +++++++++++++++++++++++++++++++++++++++++++------- 1 files changed, 151 insertions(+), 25 deletions(-) diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index 97a29fb..642b0ea 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -214,12 +214,19 @@ void panic_PAR(uint64_t par) } struct reg_ctxt { - uint32_t sctlr, ttbcr; + uint32_t sctlr, tcr; uint64_t ttbr0, ttbr1; +#ifdef CONFIG_ARM_32 + uint32_t dfar, ifar; +#else + uint64_t far; +#endif }; -static void _show_registers(struct cpu_user_regs *regs, - struct reg_ctxt *ctxt, - int guest_mode) + +static void show_registers_32(struct cpu_user_regs *regs, + struct reg_ctxt *ctxt, + int guest_mode, + const struct vcpu *v) { static const char *mode_strings[] = { [PSR_MODE_USR] = "USR", @@ -233,25 +240,34 @@ static void _show_registers(struct cpu_user_regs *regs, [PSR_MODE_SYS] = "SYS" }; - print_xen_info(); - printk("CPU: %d\n", smp_processor_id()); +#ifdef CONFIG_ARM_64 + printk("PC: %08"PRIx32"\n", regs->pc32); +#else printk("PC: %08"PRIx32, regs->pc); if ( !guest_mode ) - print_symbol(" %s", regs->pc); + print_symbol(" %s", regs->pc); printk("\n"); - printk("CPSR: %08"PRIx32" MODE:%s\n", regs->cpsr, - mode_strings[regs->cpsr & PSR_MODE_MASK]); +#endif + printk("CPSR: %08"PRIx32" MODE:%s%s\n", regs->cpsr, + guest_mode ? "32-bit Guest " : "Hypervisor", + guest_mode ? mode_strings[regs->cpsr & PSR_MODE_MASK] : ""); printk(" R0: %08"PRIx32" R1: %08"PRIx32" R2: %08"PRIx32" R3: %08"PRIx32"\n", regs->r0, regs->r1, regs->r2, regs->r3); printk(" R4: %08"PRIx32" R5: %08"PRIx32" R6: %08"PRIx32" R7: %08"PRIx32"\n", regs->r4, regs->r5, regs->r6, regs->r7); printk(" R8: %08"PRIx32" R9: %08"PRIx32" R10:%08"PRIx32" R11:%08"PRIx32" R12:%08"PRIx32"\n", - regs->r8, regs->r9, regs->r10, regs->r11, regs->r12); + regs->r8, regs->r9, regs->r10, +#ifdef CONFIG_ARM_64 + regs->r11, +#else + regs->fp, +#endif + regs->r12); if ( guest_mode ) { - printk("USR: SP: %08"PRIx32" LR: %08"PRIx32" CPSR:%08"PRIx32"\n", - regs->sp_usr, regs->lr_usr, regs->cpsr); + printk("USR: SP: %08"PRIx32" LR: %08"PRIregister"\n", + regs->sp_usr, regs->lr); printk("SVC: SP: %08"PRIx32" LR: %08"PRIx32" SPSR:%08"PRIx32"\n", regs->sp_svc, regs->lr_svc, regs->spsr_svc); printk("ABT: SP: %08"PRIx32" LR: %08"PRIx32" SPSR:%08"PRIx32"\n", @@ -264,50 +280,160 @@ static void _show_registers(struct cpu_user_regs *regs, regs->sp_fiq, regs->lr_fiq, regs->spsr_fiq); printk("FIQ: R8: %08"PRIx32" R9: %08"PRIx32" R10:%08"PRIx32" R11:%08"PRIx32" R12:%08"PRIx32"\n", regs->r8_fiq, regs->r9_fiq, regs->r10_fiq, regs->r11_fiq, regs->r11_fiq); - printk("\n"); - printk("TTBR0 %010"PRIx64" TTBR1 %010"PRIx64" TTBCR %08"PRIx32"\n", - ctxt->ttbr0, ctxt->ttbr1, ctxt->ttbcr); + } +#ifndef CONFIG_ARM_64 + else + { + printk("HYP: SP: %08"PRIx32" LR: %08"PRIregister"\n", regs->sp, regs->lr); + } +#endif + printk("\n"); + + if ( guest_mode ) + { + printk("TTBR0 %010"PRIx64" TTBR1 %010"PRIx64" TCR %08"PRIx32"\n", + ctxt->ttbr0, ctxt->ttbr1, ctxt->tcr); printk("SCTLR %08"PRIx32"\n", ctxt->sctlr); - printk("VTTBR %010"PRIx64"\n", READ_CP64(VTTBR)); + printk("IFAR %08"PRIx32" DFAR %08"PRIx32"\n", +#ifdef CONFIG_ARM_64 + (uint32_t)(ctxt->far >> 32), + (uint32_t)(ctxt->far & 0xffffffff) +#else + ctxt->ifar, ctxt->dfar +#endif + ); printk("\n"); } - else +} + +#ifdef CONFIG_ARM_64 +static void show_registers_64(struct cpu_user_regs *regs, + struct reg_ctxt *ctxt, + int guest_mode, + const struct vcpu *v) +{ + printk("PC: %016"PRIx64, regs->pc); + if ( !guest_mode ) + print_symbol(" %s", regs->pc); + printk("\n"); + printk("SP: %08"PRIx64"\n", regs->sp); + printk("CPSR: %08"PRIx32" MODE:%s\n", regs->cpsr, + guest_mode ? "64-bit Guest" : "Hypervisor"); + printk(" X0: %016"PRIx64" X1: %016"PRIx64" X2: %016"PRIx64"\n", + regs->x0, regs->x1, regs->x2); + printk(" X3: %016"PRIx64" X4: %016"PRIx64" X5: %016"PRIx64"\n", + regs->x3, regs->x4, regs->x5); + printk(" X6: %016"PRIx64" X7: %016"PRIx64" X8: %016"PRIx64"\n", + regs->x6, regs->x7, regs->x8); + printk(" X9: %016"PRIx64" X10: %016"PRIx64" X11: %016"PRIx64"\n", + regs->x9, regs->x10, regs->x11); + printk(" X12: %016"PRIx64" X13: %016"PRIx64" X14: %016"PRIx64"\n", + regs->x12, regs->x13, regs->x14); + printk(" X15: %016"PRIx64" X16: %016"PRIx64" X17: %016"PRIx64"\n", + regs->x15, regs->x16, regs->x17); + printk(" X18: %016"PRIx64" X19: %016"PRIx64" X20: %016"PRIx64"\n", + regs->x18, regs->x19, regs->x20); + printk(" X21: %016"PRIx64" X22: %016"PRIx64" X23: %016"PRIx64"\n", + regs->x21, regs->x22, regs->x23); + printk(" X24: %016"PRIx64" X25: %016"PRIx64" X26: %016"PRIx64"\n", + regs->x24, regs->x25, regs->x26); + printk(" X27: %016"PRIx64" X28: %016"PRIx64" X29: %016"PRIx64"\n", + regs->x27, regs->x28, regs->lr); + printk("\n"); + + if ( guest_mode ) { - printk(" SP: %08"PRIx32" LR: %08"PRIx32"\n", regs->sp, regs->lr); + printk("SCTLR_EL1: %08"PRIx32"\n", ctxt->sctlr); + printk(" TCR_EL1: %08"PRIx32"\n", ctxt->tcr); + printk("TTBR0_EL1: %010"PRIx64"\n", ctxt->ttbr0); + printk("TTBR1_EL1: %010"PRIx64"\n", ctxt->ttbr1); + printk(" FAR_EL1: %010"PRIx64"\n", ctxt->far); printk("\n"); } +} +#endif + +static void _show_registers(struct cpu_user_regs *regs, + struct reg_ctxt *ctxt, + int guest_mode, + const struct vcpu *v) +{ + print_xen_info(); + + printk("CPU: %d\n", smp_processor_id()); + + if ( guest_mode ) + { + if ( is_pv32_domain(v->domain) ) + show_registers_32(regs, ctxt, guest_mode, v); +#ifdef CONFIG_ARM_64 + else if ( is_pv64_domain(v->domain) ) + show_registers_64(regs, ctxt, guest_mode, v); +#endif + } + else + { +#ifdef CONFIG_ARM_64 + show_registers_64(regs, ctxt, guest_mode, v); +#else + show_registers_32(regs, ctxt, guest_mode, v); +#endif + } +#ifdef CONFIG_ARM_32 printk("HTTBR %"PRIx64"\n", READ_CP64(HTTBR)); printk("HDFAR %"PRIx32"\n", READ_CP32(HDFAR)); printk("HIFAR %"PRIx32"\n", READ_CP32(HIFAR)); printk("HPFAR %"PRIx32"\n", READ_CP32(HPFAR)); printk("HCR %08"PRIx32"\n", READ_CP32(HCR)); printk("HSR %"PRIx32"\n", READ_CP32(HSR)); + printk("VTTBR %010"PRIx64"\n", READ_CP64(VTTBR)); printk("\n"); printk("DFSR %"PRIx32" DFAR %"PRIx32"\n", READ_CP32(DFSR), READ_CP32(DFAR)); printk("IFSR %"PRIx32" IFAR %"PRIx32"\n", READ_CP32(IFSR), READ_CP32(IFAR)); printk("\n"); +#else + printk("TTBR0_EL2: %"PRIx64"\n", READ_SYSREG64(TTBR0_EL2)); + printk(" FAR_EL2: %"PRIx64"\n", READ_SYSREG64(FAR_EL2)); + printk("HPFAR_EL2: %"PRIx64"\n", READ_SYSREG64(HPFAR_EL2)); + printk(" HCR_EL2: %"PRIx64"\n", READ_SYSREG64(HCR_EL2)); + printk(" ESR_EL2: %"PRIx64"\n", READ_SYSREG64(ESR_EL2)); + printk("VTTBR_EL2: %"PRIx64"\n", READ_SYSREG64(VTTBR_EL2)); + printk("\n"); +#endif } void show_registers(struct cpu_user_regs *regs) { struct reg_ctxt ctxt; - ctxt.sctlr = READ_CP32(SCTLR); - ctxt.ttbcr = READ_CP32(TTBCR); - ctxt.ttbr0 = READ_CP64(TTBR0); - ctxt.ttbr1 = READ_CP64(TTBR1); - _show_registers(regs, &ctxt, guest_mode(regs)); + ctxt.sctlr = READ_SYSREG(SCTLR_EL1); + ctxt.tcr = READ_SYSREG(TCR_EL1); + ctxt.ttbr0 = READ_SYSREG64(TTBR0_EL1); + ctxt.ttbr1 = READ_SYSREG64(TTBR1_EL1); +#ifdef CONFIG_ARM_32 + ctxt.dfar = READ_CP32(DFAR); + ctxt.ifar = READ_CP32(IFAR); +#else + ctxt.far = READ_SYSREG(FAR_EL1); +#endif + _show_registers(regs, &ctxt, guest_mode(regs), current); } void vcpu_show_registers(const struct vcpu *v) { struct reg_ctxt ctxt; ctxt.sctlr = v->arch.sctlr; - ctxt.ttbcr = v->arch.ttbcr; + ctxt.tcr = v->arch.ttbcr; ctxt.ttbr0 = v->arch.ttbr0; ctxt.ttbr1 = v->arch.ttbr1; - _show_registers(&v->arch.cpu_info->guest_cpu_user_regs, &ctxt, 1); +#ifdef CONFIG_ARM_32 + ctxt.dfar = v->arch.dfar; + ctxt.ifar = v->arch.ifar; +#else + ctxt.far = v->arch.far; +#endif + _show_registers(&v->arch.cpu_info->guest_cpu_user_regs, &ctxt, 1, v); } static void show_guest_stack(struct cpu_user_regs *regs) -- 1.7.2.5
This is v2 of the arm64 bit series. It is based on current staging plus the "xen: public interface (and foreign check) changes for arm" series, second posting of that is <1360857557.20449.436.camel@zakaz.uk.xensource.com> I have implemented Tim''s review comments with the exception of the comments on use of WFE etc in the spinlock implementation (#8 last time) and the comments on trap handling (#28 last time) which I intend to return to. Many thanks to Tim for his copious comments and acks! I have pushed the series, plus the prerequisite mentioned above and a small number of Stefano''s toolstack patches to: git://xenbits.xen.org/people/ianc/xen.git arm64-v2 As well as the above I have pushed the kernel tree I am using, which is based on v3.8-rc3 to: git://xenbits.xen.org/people/ianc/linux.git arm64-v2 The kernel config is attached. Note that this is a 32-bit ARM kernel, 64-bit support for dom0 and domU is a WIP but not included here. I am building the 64-bit hypervisor with the Linaro gcc, gcc-linaro-aarch64-linux-gnu-4.7-2012.12-20121214_linux, from http://www.linaro.org/engineering/armv8#tab3 http://releases.linaro.org/13.01/components/toolchain/binaries/gcc-linaro-aarch64-linux-gnu-4.7-2013.01-20130125_linux.tar.bz2 For the tools I am using the native armhf tools on a Debian Wheezy armhf system (running on a cluster of IMX.5x loco boards). I have not tried cross compiling the tools. FWIW I also build the 32-bit hypervisor natively in this environment. For the kernel I am using the kernel.org cross compiler, gcc-4.6.3-nolibc / arm-unknown-linux-gnueabi, from http://www.kernel.org/pub/tools/crosstool/files/bin/x86_64/4.6.3 With all this I can boot a 32-bit dom0 and a 32-bit guest domain (using the same kernel) on either a 64-bit hypervisor (on the RTSM_VE_AEMv8A model, 0.8.4510) or a 32-bit hypervisor (RTSM_VE_Cortex-A15x1, 8.0.44). You can also run 32-bit on the V8 model (using -C cluster.cpu0.CONFIG64=0) if you comment out the ThumbEE in ctxt_switch_from and ctxt_switch_to (making this dynamic is on my TODO list). My dom0 root filesystem is a Debian Wheezy armhf image, attached to the emulated MMC (-C motherboard.mmc.p_mmc_file=rootfs.img) To save running all of the dom0 initscripts (which is a bit boring on the model) I boot using init=/root/init.sh (init.sh is attached), which mounts the necessary filesystems, starts u-boot and the relevant xen stuff. Once booted into dom0 I run the attached guest.sh, which uses the attached cfg and a guest.img (I use the one from the ARM 3rd party IP) to start a guest and connect to its console The guest boots to a prompt. I will at some point be updating http://wiki.xen.org/wiki/Xen_ARMv7_with_Virtualization_Extensions with v8 specific info (and renaming the page s/v7//). Ian. _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 32/46] xen: arm: make dom0 builder work on 64-bit hypervisor
This still only builds a 32-bit dom0, although it lays a bit of simple ground work for 64-bit dom0. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- xen/arch/arm/domain_build.c | 53 ++++++++++++++++++++++++++++-------------- 1 files changed, 35 insertions(+), 18 deletions(-) diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c index 30d014a..29cef73 100644 --- a/xen/arch/arm/domain_build.c +++ b/xen/arch/arm/domain_build.c @@ -68,7 +68,7 @@ static int set_memory_reg(struct domain *d, struct kernel_info *kinfo, size = kinfo->unassigned_mem; device_tree_set_reg(&new_cell, address_cells, size_cells, start, size); - printk("Populate P2M %#llx->%#llx\n", start, start + size); + printk("Populate P2M %#"PRIx64"->%#"PRIx64"\n", start, start + size); p2m_populate_ram(d, start, start + size); kinfo->mem.bank[kinfo->mem.nr_banks].start = start; kinfo->mem.bank[kinfo->mem.nr_banks].size = size; @@ -268,7 +268,7 @@ static int prepare_dtb(struct domain *d, struct kernel_info *kinfo) static void dtb_load(struct kernel_info *kinfo) { - void * __user dtb_virt = (void *)(register_t)kinfo->dtb_paddr; + void * __user dtb_virt = (void * __user)(register_t)kinfo->dtb_paddr; raw_copy_to_guest(dtb_virt, kinfo->fdt, fdt_totalsize(kinfo->fdt)); xfree(kinfo->fdt); @@ -319,7 +319,8 @@ int construct_dom0(struct domain *d) gic_route_irq_to_guest(d, 47, "eth"); /* Enable second stage translation */ - WRITE_CP32(READ_CP32(HCR) | HCR_VM, HCR); isb(); + WRITE_SYSREG(READ_SYSREG(HCR_EL2) | HCR_VM, HCR_EL2); + isb(); /* The following loads use the domain''s p2m */ p2m_load_VTTBR(d); @@ -337,24 +338,40 @@ int construct_dom0(struct domain *d) regs->cpsr = PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_SVC; -/* FROM LINUX head.S - - * Kernel startup entry point. - * --------------------------- - * - * This is normally called from the decompressor code. The requirements - * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0, - * r1 = machine nr, r2 = atags or dtb pointer. - *... - */ +#ifdef CONFIG_ARM_64 + d->arch.type = kinfo.type; +#endif - regs->r0 = 0; /* SBZ */ - regs->r1 = 0xffffffff; /* We use DTB therefore no machine id */ - regs->r2 = kinfo.dtb_paddr; + if ( is_pv32_domain(d) ) + { + /* FROM LINUX head.S + * + * Kernel startup entry point. + * --------------------------- + * + * This is normally called from the decompressor code. The requirements + * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0, + * r1 = machine nr, r2 = atags or dtb pointer. + *... + */ + regs->r0 = 0; /* SBZ */ + regs->r1 = 0xffffffff; /* We use DTB therefore no machine id */ + regs->r2 = kinfo.dtb_paddr; + } +#ifdef CONFIG_ARM_64 + else + { + /* From linux/Documentation/arm64/booting.txt */ + regs->x0 = kinfo.dtb_paddr; + regs->x1 = 0; /* Reserved for future use */ + regs->x2 = 0; /* Reserved for future use */ + regs->x3 = 0; /* Reserved for future use */ + } +#endif - WRITE_CP32(SCTLR_BASE, SCTLR); + v->arch.sctlr = SCTLR_BASE; - WRITE_CP32(HCR_PTW|HCR_BSU_OUTER|HCR_AMO|HCR_IMO|HCR_VM, HCR); + WRITE_SYSREG(HCR_PTW|HCR_BSU_OUTER|HCR_AMO|HCR_IMO|HCR_VM, HCR_EL2); isb(); local_abort_enable(); -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 33/46] xen: arm: gic: use 64-bit compatible registers
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- xen/arch/arm/gic.c | 12 +++++------- 1 files changed, 5 insertions(+), 7 deletions(-) diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c index 7627ad8..e1af33a 100644 --- a/xen/arch/arm/gic.c +++ b/xen/arch/arm/gic.c @@ -267,7 +267,7 @@ static void __init gic_dist_init(void) /* Disable all global interrupts */ for ( i = 32; i < gic.lines; i += 32 ) - GICD[GICD_ICENABLER + i / 32] = ~0ul; + GICD[GICD_ICENABLER + i / 32] = (uint32_t)~0ul; /* Turn on the distributor */ GICD[GICD_CTLR] = GICD_CTL_ENABLE; @@ -531,18 +531,16 @@ static void gic_restore_pending_irqs(struct vcpu *v) static void gic_inject_irq_start(void) { - uint32_t hcr; - hcr = READ_CP32(HCR); - WRITE_CP32(hcr | HCR_VI, HCR); + register_t hcr = READ_SYSREG(HCR_EL2); + WRITE_SYSREG(hcr | HCR_VI, HCR_EL2); isb(); } static void gic_inject_irq_stop(void) { - uint32_t hcr; - hcr = READ_CP32(HCR); + register_t hcr = READ_SYSREG(HCR_EL2); if (hcr & HCR_VI) { - WRITE_CP32(hcr & ~HCR_VI, HCR); + WRITE_SYSREG(hcr & ~HCR_VI, HCR_EL2); isb(); } } -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 34/46] xen: arm: time: use 64-bit compatible registers
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- xen/arch/arm/time.c | 48 +++++++++++++++++++++-------------------- xen/include/asm-arm/cpregs.h | 12 ++++++++++ 2 files changed, 37 insertions(+), 23 deletions(-) diff --git a/xen/arch/arm/time.c b/xen/arch/arm/time.c index 3dad9b3..ee92d8c 100644 --- a/xen/arch/arm/time.c +++ b/xen/arch/arm/time.c @@ -76,9 +76,9 @@ static uint32_t calibrate_timer(void) sec = rtc[0] + 1; do {} while ( rtc[0] != sec ); // Now time a few seconds - start = READ_CP64(CNTPCT); + start = READ_SYSREG64(CNTPCT_EL0); do {} while ( rtc[0] < sec + 32 ); - end = READ_CP64(CNTPCT); + end = READ_SYSREG64(CNTPCT_EL0); printk("done.\n"); clear_fixmap(FIXMAP_MISC); @@ -90,11 +90,13 @@ static uint32_t calibrate_timer(void) int __init init_xen_time(void) { /* Check that this CPU supports the Generic Timer interface */ +#if defined(CONFIG_ARM_32) if ( (READ_CP32(ID_PFR1) & ID_PFR1_GT_MASK) != ID_PFR1_GT_v1 ) panic("CPU does not support the Generic Timer v1 interface.\n"); +#endif - cpu_khz = READ_CP32(CNTFRQ) / 1000; - boot_count = READ_CP64(CNTPCT); + cpu_khz = READ_SYSREG32(CNTFRQ_EL0) / 1000; + boot_count = READ_SYSREG64(CNTPCT_EL0); printk("Using generic timer at %lu KHz\n", cpu_khz); return 0; @@ -103,7 +105,7 @@ int __init init_xen_time(void) /* Return number of nanoseconds since boot */ s_time_t get_s_time(void) { - uint64_t ticks = READ_CP64(CNTPCT) - boot_count; + uint64_t ticks = READ_SYSREG64(CNTPCT_EL0) - boot_count; return ticks_to_ns(ticks); } @@ -117,20 +119,20 @@ int reprogram_timer(s_time_t timeout) if ( timeout == 0 ) { #if USE_HYP_TIMER - WRITE_CP32(0, CNTHP_CTL); + WRITE_SYSREG32(0, CNTHP_CTL_EL2); #else - WRITE_CP32(0, CNTP_CTL); + WRITE_SYSREG32(0, CNTP_CTL_EL0); #endif return 1; } deadline = ns_to_ticks(timeout) + boot_count; #if USE_HYP_TIMER - WRITE_CP64(deadline, CNTHP_CVAL); - WRITE_CP32(CNTx_CTL_ENABLE, CNTHP_CTL); + WRITE_SYSREG64(deadline, CNTHP_CVAL_EL2); + WRITE_SYSREG32(CNTx_CTL_ENABLE, CNTHP_CTL_EL2); #else - WRITE_CP64(deadline, CNTP_CVAL); - WRITE_CP32(CNTx_CTL_ENABLE, CNTP_CTL); + WRITE_SYSREG64(deadline, CNTP_CVAL_EL0); + WRITE_SYSREG32(CNTx_CTL_ENABLE, CNTP_CTL_EL0); #endif isb(); @@ -142,27 +144,27 @@ int reprogram_timer(s_time_t timeout) /* Handle the firing timer */ static void timer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs) { - if ( irq == 26 && READ_CP32(CNTHP_CTL) & CNTx_CTL_PENDING ) + if ( irq == 26 && READ_SYSREG32(CNTHP_CTL_EL2) & CNTx_CTL_PENDING ) { /* Signal the generic timer code to do its work */ raise_softirq(TIMER_SOFTIRQ); /* Disable the timer to avoid more interrupts */ - WRITE_CP32(0, CNTHP_CTL); + WRITE_SYSREG32(0, CNTHP_CTL_EL2); } - if (irq == 30 && READ_CP32(CNTP_CTL) & CNTx_CTL_PENDING ) + if (irq == 30 && READ_SYSREG32(CNTP_CTL_EL0) & CNTx_CTL_PENDING ) { /* Signal the generic timer code to do its work */ raise_softirq(TIMER_SOFTIRQ); /* Disable the timer to avoid more interrupts */ - WRITE_CP32(0, CNTP_CTL); + WRITE_SYSREG32(0, CNTP_CTL_EL0); } } static void vtimer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs) { - current->arch.virt_timer.ctl = READ_CP32(CNTV_CTL); - WRITE_CP32(current->arch.virt_timer.ctl | CNTx_CTL_MASK, CNTV_CTL); + current->arch.virt_timer.ctl = READ_SYSREG32(CNTV_CTL_EL0); + WRITE_SYSREG32(current->arch.virt_timer.ctl | CNTx_CTL_MASK, CNTV_CTL_EL0); vgic_vcpu_inject_irq(current, irq, 1); } @@ -170,17 +172,17 @@ static void vtimer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs) void __cpuinit init_timer_interrupt(void) { /* Sensible defaults */ - WRITE_CP64(0, CNTVOFF); /* No VM-specific offset */ - WRITE_CP32(0, CNTKCTL); /* No user-mode access */ + WRITE_SYSREG64(0, CNTVOFF_EL2); /* No VM-specific offset */ + WRITE_SYSREG32(0, CNTKCTL_EL1); /* No user-mode access */ #if USE_HYP_TIMER /* Do not let the VMs program the physical timer, only read the physical counter */ - WRITE_CP32(CNTHCTL_PA, CNTHCTL); + WRITE_SYSREG32(CNTHCTL_PA, CNTHCTL_EL2); #else /* Cannot let VMs access physical counter if we are using it */ - WRITE_CP32(0, CNTHCTL); + WRITE_SYSREG32(0, CNTHCTL_EL2); #endif - WRITE_CP32(0, CNTP_CTL); /* Physical timer disabled */ - WRITE_CP32(0, CNTHP_CTL); /* Hypervisor''s timer disabled */ + WRITE_SYSREG32(0, CNTP_CTL_EL0); /* Physical timer disabled */ + WRITE_SYSREG32(0, CNTHP_CTL_EL2); /* Hypervisor''s timer disabled */ isb(); /* XXX Need to find this IRQ number from devicetree? */ diff --git a/xen/include/asm-arm/cpregs.h b/xen/include/asm-arm/cpregs.h index 732f967..a374f5c 100644 --- a/xen/include/asm-arm/cpregs.h +++ b/xen/include/asm-arm/cpregs.h @@ -230,6 +230,18 @@ #define AFSR1_EL1 AIFSR #define CCSIDR_EL1 CCSIDR #define CLIDR_EL1 CLIDR +#define CNTFRQ_EL0 CNTFRQ +#define CNTHCTL_EL2 CNTHCTL +#define CNTHP_CTL_EL2 CNTHP_CTL +#define CNTHP_CVAL_EL2 CNTHP_CVAL +#define CNTKCTL_EL1 CNTKCTL +#define CNTPCT_EL0 CNTPCT +#define CNTP_CTL_EL0 CNTP_CTL +#define CNTP_CVAL_EL0 CNTP_CVAL +#define CNTVCT_EL0 CNTVCT +#define CNTVOFF_EL2 CNTVOFF +#define CNTV_CTL_EL0 CNTV_CTL +#define CNTV_CVAL_EL0 CNTV_CVAL #define CONTEXTIDR_EL1 CONTEXTIDR #define CPACR_EL1 CPACR #define CSSELR_EL1 CSSELR -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 35/46] xen: arm: p2m: use 64-bit compatible registers.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- xen/arch/arm/p2m.c | 2 +- xen/include/asm-arm/cpregs.h | 1 + 2 files changed, 2 insertions(+), 1 deletions(-) diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c index 852f0d8..aaa43ef 100644 --- a/xen/arch/arm/p2m.c +++ b/xen/arch/arm/p2m.c @@ -29,7 +29,7 @@ void p2m_load_VTTBR(struct domain *d) vttbr |= ((uint64_t)p2m->vmid&0xff)<<48; - WRITE_CP64(vttbr, VTTBR); + WRITE_SYSREG64(vttbr, VTTBR_EL2); isb(); /* Ensure update is visible */ } diff --git a/xen/include/asm-arm/cpregs.h b/xen/include/asm-arm/cpregs.h index a374f5c..676c8cf 100644 --- a/xen/include/asm-arm/cpregs.h +++ b/xen/include/asm-arm/cpregs.h @@ -277,6 +277,7 @@ #define VBAR_EL1 VBAR #define VBAR_EL2 HVBAR #define VTCR_EL2 VTCR +#define VTTBR_EL2 VTTBR #endif -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 36/46] xen: arm: Use 64-bit compatible registers in vtimer.
Also, don''t crash the host if we fail to emulate a vtimer access, just kill the guest. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- xen/arch/arm/traps.c | 14 ++++++++++++-- xen/arch/arm/vtimer.c | 23 +++++++++++++---------- 2 files changed, 25 insertions(+), 12 deletions(-) diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index 642b0ea..20d2db9 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -712,7 +712,12 @@ static void do_cp15_32(struct cpu_user_regs *regs, break; case HSR_CPREG32(CNTP_CTL): case HSR_CPREG32(CNTP_TVAL): - BUG_ON(!vtimer_emulate(regs, hsr)); + if ( !vtimer_emulate(regs, hsr) ) + { + dprintk(XENLOG_ERR, + "failed emulation of 32-bit vtimer CP register access\n"); + domain_crash_synchronous(); + } break; default: printk("%s p15, %d, r%d, cr%d, cr%d, %d @ 0x%"PRIregister"\n", @@ -742,7 +747,12 @@ static void do_cp15_64(struct cpu_user_regs *regs, switch ( hsr.bits & HSR_CP64_REGS_MASK ) { case HSR_CPREG64(CNTPCT): - BUG_ON(!vtimer_emulate(regs, hsr)); + if ( !vtimer_emulate(regs, hsr) ) + { + dprintk(XENLOG_ERR, + "failed emulation of 64-bit vtimer CP register access\n"); + domain_crash_synchronous(); + } break; default: printk("%s p15, %d, r%d, r%d, cr%d @ 0x%"PRIregister"\n", diff --git a/xen/arch/arm/vtimer.c b/xen/arch/arm/vtimer.c index 291b87e..0051ff7 100644 --- a/xen/arch/arm/vtimer.c +++ b/xen/arch/arm/vtimer.c @@ -42,7 +42,7 @@ static void virt_timer_expired(void *data) struct vtimer *t = data; vcpu_wake(t->v); } - + int vcpu_vtimer_init(struct vcpu *v) { struct vtimer *t = &v->arch.phys_timer; @@ -57,7 +57,7 @@ int vcpu_vtimer_init(struct vcpu *v) t = &v->arch.virt_timer; init_timer(&t->timer, virt_timer_expired, t, smp_processor_id()); t->ctl = 0; - t->offset = READ_CP64(CNTVCT) + READ_CP64(CNTVOFF); + t->offset = READ_SYSREG64(CNTVCT_EL0) + READ_SYSREG64(CNTVOFF_EL2); t->cval = 0; t->irq = 27; t->v = v; @@ -73,9 +73,9 @@ void vcpu_timer_destroy(struct vcpu *v) int virt_timer_save(struct vcpu *v) { - v->arch.virt_timer.ctl = READ_CP32(CNTV_CTL); - WRITE_CP32(v->arch.virt_timer.ctl & ~CNTx_CTL_ENABLE, CNTV_CTL); - v->arch.virt_timer.cval = READ_CP64(CNTV_CVAL); + v->arch.virt_timer.ctl = READ_SYSREG32(CNTV_CTL_EL0); + WRITE_SYSREG32(v->arch.virt_timer.ctl & ~CNTx_CTL_ENABLE, CNTV_CTL_EL0); + v->arch.virt_timer.cval = READ_SYSREG64(CNTV_CVAL_EL0); if ( v->arch.virt_timer.ctl & CNTx_CTL_ENABLE ) { set_timer(&v->arch.virt_timer.timer, ticks_to_ns(v->arch.virt_timer.cval + @@ -88,13 +88,13 @@ int virt_timer_restore(struct vcpu *v) { stop_timer(&v->arch.virt_timer.timer); - WRITE_CP32(v->arch.virt_timer.ctl & ~CNTx_CTL_ENABLE, CNTV_CTL); - WRITE_CP64(v->arch.virt_timer.offset, CNTVOFF); - WRITE_CP64(v->arch.virt_timer.cval, CNTV_CVAL); - WRITE_CP32(v->arch.virt_timer.ctl, CNTV_CTL); + WRITE_SYSREG32(v->arch.virt_timer.ctl & ~CNTx_CTL_ENABLE, CNTV_CTL_EL0); + WRITE_SYSREG64(v->arch.virt_timer.offset, CNTVOFF_EL2); + WRITE_SYSREG64(v->arch.virt_timer.cval, CNTV_CVAL_EL0); + WRITE_SYSREG32(v->arch.virt_timer.ctl, CNTV_CTL_EL0); return 0; } - + static int vtimer_emulate_32(struct cpu_user_regs *regs, union hsr hsr) { struct vcpu *v = current; @@ -180,6 +180,9 @@ static int vtimer_emulate_64(struct cpu_user_regs *regs, union hsr hsr) int vtimer_emulate(struct cpu_user_regs *regs, union hsr hsr) { + if ( !is_pv32_domain(current->domain) ) + return -EINVAL; + switch (hsr.ec) { case HSR_EC_CP15_32: return vtimer_emulate_32(regs, hsr); -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 37/46] xen: arm: select_user_reg support for 64-bit hypervisor
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- xen/arch/arm/traps.c | 10 ++++++++++ 1 files changed, 10 insertions(+), 0 deletions(-) diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index 20d2db9..b2b9327 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -72,6 +72,7 @@ register_t *select_user_reg(struct cpu_user_regs *regs, int reg) { BUG_ON( !guest_mode(regs) ); +#ifdef CONFIG_ARM_32 /* * We rely heavily on the layout of cpu_user_regs to avoid having * to handle all of the registers individually. Use BUILD_BUG_ON to @@ -124,6 +125,15 @@ register_t *select_user_reg(struct cpu_user_regs *regs, int reg) BUG(); } #undef REGOFFS +#else + /* In 64 bit the syndrome register contains the AArch64 register + * number even if the trap was from AArch32 mode. Except that + * AArch32 R15 (PC) is encoded as 0b11111. + */ + if ( reg == 0x1f /* && is aarch32 guest */) + return ®s->pc; + return ®s->x0 + reg; +#endif } static const char *decode_fsc(uint32_t fsc, int *level) -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 38/46] xen: arm: handle 32-bit guest CP register traps on 64-bit hypervisor
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- xen/arch/arm/traps.c | 10 +++++++--- 1 files changed, 7 insertions(+), 3 deletions(-) diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index b2b9327..1e64be1 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -700,16 +700,16 @@ static void do_cp15_32(struct cpu_user_regs *regs, "attempt to write to read-only register CLIDR\n"); domain_crash_synchronous(); } - *r = READ_CP32(CLIDR); + *r = READ_SYSREG32(CLIDR_EL1); break; case HSR_CPREG32(CCSIDR): if ( !cp32.read ) { dprintk(XENLOG_ERR, - "attempt to write to read-only register CSSIDR\n"); + "attempt to write to read-only register CCSIDR\n"); domain_crash_synchronous(); } - *r = READ_CP32(CCSIDR); + *r = READ_SYSREG32(CCSIDR_EL1); break; case HSR_CPREG32(DCCISW): if ( cp32.read ) @@ -718,7 +718,11 @@ static void do_cp15_32(struct cpu_user_regs *regs, "attempt to read from write-only register DCCISW\n"); domain_crash_synchronous(); } +#ifdef CONFIG_ARM_32 WRITE_CP32(*r, DCCISW); +#else + asm volatile("dc cisw, %0;" : : "r" (*r) : "memory"); +#endif break; case HSR_CPREG32(CNTP_CTL): case HSR_CPREG32(CNTP_TVAL): -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 39/46] xen: arm: guest stage 1 walks on 64-bit hypervisor
Still only supports non-LPAE 32-bit guests. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- xen/arch/arm/traps.c | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index 1e64be1..e00fef0 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -780,8 +780,8 @@ static void do_cp15_64(struct cpu_user_regs *regs, void dump_guest_s1_walk(struct domain *d, vaddr_t addr) { - uint32_t ttbcr = READ_CP32(TTBCR); - uint64_t ttbr0 = READ_CP64(TTBR0); + uint32_t ttbcr = READ_SYSREG32(TCR_EL1); + uint64_t ttbr0 = READ_SYSREG64(TTBR0_EL1); paddr_t paddr; uint32_t offset; uint32_t *first = NULL, *second = NULL; -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 40/46] xen: arm: implement do_multicall_call for both 32 and 64-bit
Obviously nothing is actually making multicalls even on 32-bit so this isn''t tested. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- xen/arch/arm/traps.c | 22 ++++++++++++++++++++++ xen/include/asm-arm/multicall.h | 11 +---------- 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index e00fef0..5f9c785 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -675,6 +675,28 @@ static void do_trap_hypercall(struct cpu_user_regs *regs, unsigned long iss) #endif } +void do_multicall_call(struct multicall_entry *multi) +{ + arm_hypercall_fn_t call = NULL; + + if ( multi->op >= ARRAY_SIZE(arm_hypercall_table) ) + { + multi->result = -ENOSYS; + return; + } + + call = arm_hypercall_table[multi->op].fn; + if ( call == NULL ) + { + multi->result = -ENOSYS; + return; + } + + multi->result = call(multi->args[0], multi->args[1], + multi->args[2], multi->args[3], + multi->args[4]); +} + static void do_cp15_32(struct cpu_user_regs *regs, union hsr hsr) { diff --git a/xen/include/asm-arm/multicall.h b/xen/include/asm-arm/multicall.h index c800940..f717b51 100644 --- a/xen/include/asm-arm/multicall.h +++ b/xen/include/asm-arm/multicall.h @@ -1,16 +1,7 @@ #ifndef __ASM_ARM_MULTICALL_H__ #define __ASM_ARM_MULTICALL_H__ -#define do_multicall_call(_call) \ - do { \ - __asm__ __volatile__ ( \ - ".word 0xe7f000f0@; do_multicall_call\n" \ - " mov r0,#0; @ do_multicall_call\n" \ - " str r0, [r0];\n" \ - : \ - : \ - : ); \ - } while ( 0 ) +extern void do_multicall_call(struct multicall_entry *call); #endif /* __ASM_ARM_MULTICALL_H__ */ /* -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 41/46] xen: arm: Enable VFP is a nop on 64-bit.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- xen/include/asm-arm/vfp.h | 10 +++++++++- 1 files changed, 9 insertions(+), 1 deletions(-) diff --git a/xen/include/asm-arm/vfp.h b/xen/include/asm-arm/vfp.h index 0bab2a8..5c61376 100644 --- a/xen/include/asm-arm/vfp.h +++ b/xen/include/asm-arm/vfp.h @@ -3,6 +3,9 @@ #include <xen/types.h> + +#ifdef CONFIG_ARM_32 + #define FPEXC_EN (1u << 30) /* Save and restore FP state. @@ -17,12 +20,17 @@ asm volatile ("fmxr fp" #reg ", %0" : : "r" (val)); \ } while (0) - /* Start-of-day: Turn on VFP */ static inline void enable_vfp(void) { WRITE_FP(exc, READ_FP(exc) | FPEXC_EN); } +#else +static inline void enable_vfp(void) +{ + /* Always enable on 64-bit */ +} +#endif #endif /* -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 42/46] xen: arm: Use generic mem{cpy, move, set, zero} on 64-bit
No optimised versions are available in Linux yet (meaning I couldn''t copy them). Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- xen/include/asm-arm/string.h | 3 +++ 1 files changed, 3 insertions(+), 0 deletions(-) diff --git a/xen/include/asm-arm/string.h b/xen/include/asm-arm/string.h index f2d643d..e5d1e7e 100644 --- a/xen/include/asm-arm/string.h +++ b/xen/include/asm-arm/string.h @@ -3,6 +3,7 @@ #include <xen/config.h> +#if defined(CONFIG_ARM_32) #define __HAVE_ARCH_MEMCPY extern void * memcpy(void *, const void *, __kernel_size_t); @@ -27,6 +28,8 @@ extern void __memzero(void *ptr, __kernel_size_t n); (__p); \ }) +#endif + #endif /* __ARM_STRING_H__ */ /* * Local variables: -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 43/46] xen: arm: Explicitly setup VPIDR & VMPIDR at start of day
These are supposed to reset to the value of the underlying hardware but appears not to be on at least some v8 models. There''s no harm in setting them explicitly. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- xen/arch/arm/setup.c | 5 +++++ xen/include/asm-arm/cpregs.h | 6 ++++++ 2 files changed, 11 insertions(+), 0 deletions(-) diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c index 299848e..94e9754 100644 --- a/xen/arch/arm/setup.c +++ b/xen/arch/arm/setup.c @@ -56,6 +56,11 @@ static void __init init_idle_domain(void) static void __init processor_id(void) { + + /* Setup the virtual ID to match the physical */ + WRITE_SYSREG32(READ_SYSREG32(MIDR_EL1), VPIDR_EL2); + WRITE_SYSREG(READ_SYSREG(MPIDR_EL1), VMPIDR_EL2); + #if defined(CONFIG_ARM_64) printk("64-bit Processor Features: %016"PRIx64" %016"PRIx64"\n", READ_SYSREG64(ID_AA64PFR0_EL1), READ_SYSREG64(ID_AA64PFR1_EL1)); diff --git a/xen/include/asm-arm/cpregs.h b/xen/include/asm-arm/cpregs.h index 676c8cf..908aad9 100644 --- a/xen/include/asm-arm/cpregs.h +++ b/xen/include/asm-arm/cpregs.h @@ -95,6 +95,8 @@ #define CCSIDR p15,1,c0,c0,0 /* Cache Size ID Registers */ #define CLIDR p15,1,c0,c0,1 /* Cache Level ID Register */ #define CSSELR p15,2,c0,c0,0 /* Cache Size Selection Register */ +#define VPIDR p15,4,c0,c0,0 /* Virtualization Processor ID Register */ +#define VMPIDR p15,4,c0,c0,5 /* Virtualization Multiprocessor ID Register */ /* CP15 CR1: System Control Registers */ #define SCTLR p15,0,c1,c0,0 /* System Control Register */ @@ -278,6 +280,10 @@ #define VBAR_EL2 HVBAR #define VTCR_EL2 VTCR #define VTTBR_EL2 VTTBR +#define MIDR_EL1 MIDR +#define VPIDR_EL2 VPIDR +#define MPIDR_EL1 MPIDR +#define VMPIDR_EL2 VMPIDR #endif -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:47 UTC
[PATCH V2 44/46] xen: arm: print arm64 not arm32 in xen info when appropriate.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- xen/arch/arm/traps.c | 7 ++++++- 1 files changed, 6 insertions(+), 1 deletions(-) diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index 5f9c785..52af819 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -63,8 +63,13 @@ static void print_xen_info(void) { char taint_str[TAINT_STRING_MAX_LEN]; - printk("----[ Xen-%d.%d%s arm32 debug=%c %s ]----\n", + printk("----[ Xen-%d.%d%s %s debug=%c %s ]----\n", xen_major_version(), xen_minor_version(), xen_extra_version(), +#ifdef CONFIG_ARM_32 + "arm32", +#else + "arm64", +#endif debug_build() ? ''y'' : ''n'', print_tainted(taint_str)); } -- 1.7.2.5
Need to check for the 64-bit EL2 modes, not 32-bit HYP mode. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- xen/include/asm-arm/regs.h | 8 +++++++- 1 files changed, 7 insertions(+), 1 deletions(-) diff --git a/xen/include/asm-arm/regs.h b/xen/include/asm-arm/regs.h index a723f92..6bfab38 100644 --- a/xen/include/asm-arm/regs.h +++ b/xen/include/asm-arm/regs.h @@ -13,10 +13,16 @@ #define svc_mode(r) psr_mode((r)->cpsr,PSR_MODE_SVC) #define mon_mode(r) psr_mode((r)->cpsr,PSR_MODE_MON) #define abt_mode(r) psr_mode((r)->cpsr,PSR_MODE_ABT) -#define hyp_mode(r) psr_mode((r)->cpsr,PSR_MODE_HYP) #define und_mode(r) psr_mode((r)->cpsr,PSR_MODE_UND) #define sys_mode(r) psr_mode((r)->cpsr,PSR_MODE_SYS) +#ifdef CONFIG_ARM_32 +#define hyp_mode(r) psr_mode((r)->cpsr,PSR_MODE_HYP) +#else +#define hyp_mode(r) (psr_mode((r)->cpsr,PSR_MODE_EL2h) || \ + psr_mode((r)->cpsr,PSR_MODE_EL2t)) +#endif + #define guest_mode(r) \ ({ \ unsigned long diff = (char *)guest_cpu_user_regs() - (char *)(r); \ -- 1.7.2.5
Ian Campbell
2013-Feb-14 16:48 UTC
[PATCH V2 46/46] xen: arm: skanky "appended kernel" option
I''m using this with the ARMv8 Foundation model: ./Foundation_v8pkg/Foundation_v8 \ --image xen-unstable/xen/xen-arm64 \ --data flash.img@0x80400000 where flash.img is a zImage (what you would put in Flash in the VE models) (disabled by default edit config.h to enable) Mostly throwing this out there in case others find it useful. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- xen/arch/arm/setup.c | 14 ++++++++++++++ xen/include/asm-arm/config.h | 2 ++ 2 files changed, 16 insertions(+), 0 deletions(-) diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c index 94e9754..967a8d4 100644 --- a/xen/arch/arm/setup.c +++ b/xen/arch/arm/setup.c @@ -368,6 +368,20 @@ void __init start_xen(unsigned long boot_phys_offset, + (fdt_paddr & ((1 << SECOND_SHIFT) - 1)); fdt_size = device_tree_early_init(fdt); +#ifdef CONFIG_KERNEL_APPEND + early_info.modules.module[1].start = boot_phys_offset + (uintptr_t)_end; + early_info.modules.module[1].start += (2<<20)-1; + early_info.modules.module[1].start &= ~((2<<20)-1); + + early_info.modules.module[1].size = 4<<20; + early_info.modules.nr_mods = 1; + early_printk("assuming kernel is appended at " + "%"PRIpaddr"-%"PRIpaddr"\n", + early_info.modules.module[1].start, + early_info.modules.module[1].start + + early_info.modules.module[1].size); +#endif + cpus = smp_get_max_cpus(); cmdline_parse(device_tree_bootargs(fdt)); diff --git a/xen/include/asm-arm/config.h b/xen/include/asm-arm/config.h index add70bd..d02ef6c 100644 --- a/xen/include/asm-arm/config.h +++ b/xen/include/asm-arm/config.h @@ -34,6 +34,8 @@ #define CONFIG_DOMAIN_PAGE 1 +//#define CONFIG_KERNEL_APPEND 1 + #define OPT_CONSOLE_STR "com1" #ifdef MAX_PHYS_CPUS -- 1.7.2.5
> I am building the 64-bit hypervisor with the Linaro gcc, > gcc-linaro-aarch64-linux-gnu-4.7-2012.12-20121214_linux, from > http://www.linaro.org/engineering/armv8#tab3 > http://releases.linaro.org/13.01/components/toolchain/binaries/gcc-linaro-aarch64-linux-gnu-4.7-2013.01-20130125_linux.tar.bz2I forgot to mention that for the DTB I am using arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dtb built by the kernel tree, passing it to the Xen build on the make command line: make -C xen XEN_TARGET_ARCH=arm64 CONFIG_DTB_FILE=/home/ianc/vexpress-v2p-ca15-tc1-linux.dtb debug=y CROSS_COMPILE=aarch64-linux-gnu- -j12 -s install Xen is passed to the model directly as the image to boot (no bootwrapper) The dom0 kernel is passed to the hypervisor in the flash "-C motherboard.flashloader0.fname=zImage". The arm64 boot-wrapper is not yet advanced enough to be ported to the scheme used in "xen: arm: parse modules from DT during early boot." per http://lists.xen.org/archives/html/xen-devel/2013-01/msg02469.html For the guest I use arch/arm/boot/xenvm-4.2.dtb and append it to the zImage (the kernel config has CONFIG_ARM_APPENDED_DTB). Ian.
On Thu, 2013-02-14 at 16:47 +0000, Ian Campbell wrote:> You can also run 32-bit on the V8 model (using -C > cluster.cpu0.CONFIG64=0) if you comment out the ThumbEE in > ctxt_switch_from and ctxt_switch_to (making this dynamic is on my TODO > list).8<----------------------------------------------- From e45c4e4f45e72e404052629c619af8810dadd76f Mon Sep 17 00:00:00 2001 From: Ian Campbell <ian.campbell@citrix.com> Date: Fri, 15 Feb 2013 10:30:48 +0000 Subject: [PATCH] xen: arm: implement cpuinfo Use to: - Only context switch ThumbEE state if the processor implements it. In particular the ARMv8 FastModels do not. - Detect the generic timer, and therefore call identify_cpu before init_xen_time. Also improve the boot time messages a bit. I haven''t added decoding for all of the CPUID words, it seems like overkill for the moment. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Cc: tim@xen.org Cc: stefano.stabellini@citrix.com --- xen/arch/arm/Makefile | 1 + xen/arch/arm/cpu.c | 69 +++++++++++++++++++++ xen/arch/arm/domain.c | 39 ++++++++++--- xen/arch/arm/setup.c | 109 +++++++++++++++++++++++++--------- xen/arch/arm/smpboot.c | 7 ++ xen/arch/arm/time.c | 5 +- xen/include/asm-arm/cpregs.h | 11 ++-- xen/include/asm-arm/cpufeature.h | 40 +++++++++++++ xen/include/asm-arm/domain.h | 10 +++- xen/include/asm-arm/processor.h | 121 ++++++++++++++++++++++++++++++++++++- 10 files changed, 364 insertions(+), 48 deletions(-) create mode 100644 xen/arch/arm/cpu.c create mode 100644 xen/include/asm-arm/cpufeature.h diff --git a/xen/arch/arm/Makefile b/xen/arch/arm/Makefile index 7ff67c7..a43e7c9 100644 --- a/xen/arch/arm/Makefile +++ b/xen/arch/arm/Makefile @@ -2,6 +2,7 @@ subdir-$(arm32) += arm32 subdir-$(arm64) += arm64 obj-y += early_printk.o +obj-y += cpu.o obj-y += domain.o obj-y += domctl.o obj-y += sysctl.o diff --git a/xen/arch/arm/cpu.c b/xen/arch/arm/cpu.c new file mode 100644 index 0000000..7a8ad33 --- /dev/null +++ b/xen/arch/arm/cpu.c @@ -0,0 +1,69 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <xen/config.h> +#include <xen/init.h> +#include <xen/lib.h> +#include <xen/sched.h> + +#include <asm/processor.h> + +void __cpuinit identify_cpu(struct cpuinfo_arm *c) +{ + c->midr.bits = READ_SYSREG32(MIDR_EL1); + c->mpidr.bits = READ_SYSREG(MPIDR_EL1); + +#ifdef CONFIG_ARM_64 + c->pfr64.bits[0] = READ_SYSREG64(ID_AA64PFR0_EL1); + c->pfr64.bits[1] = READ_SYSREG64(ID_AA64PFR1_EL1); + + c->dbg64.bits[0] = READ_SYSREG64(ID_AA64DFR0_EL1); + c->dbg64.bits[1] = READ_SYSREG64(ID_AA64DFR1_EL1); + + c->aux64.bits[0] = READ_SYSREG64(ID_AA64AFR0_EL1); + c->aux64.bits[1] = READ_SYSREG64(ID_AA64AFR1_EL1); + + c->mm64.bits[0] = READ_SYSREG64(ID_AA64MMFR0_EL1); + c->mm64.bits[1] = READ_SYSREG64(ID_AA64MMFR1_EL1); + + c->isa64.bits[0] = READ_SYSREG64(ID_AA64ISAR0_EL1); + c->isa64.bits[1] = READ_SYSREG64(ID_AA64ISAR1_EL1); +#endif + + c->pfr32.bits[0] = READ_SYSREG32(ID_PFR0_EL1); + c->pfr32.bits[1] = READ_SYSREG32(ID_PFR1_EL1); + + c->dbg32.bits[0] = READ_SYSREG32(ID_DFR0_EL1); + + c->aux32.bits[0] = READ_SYSREG32(ID_AFR0_EL1); + + c->mm32.bits[0] = READ_SYSREG32(ID_MMFR0_EL1); + c->mm32.bits[1] = READ_SYSREG32(ID_MMFR1_EL1); + c->mm32.bits[2] = READ_SYSREG32(ID_MMFR2_EL1); + c->mm32.bits[3] = READ_SYSREG32(ID_MMFR3_EL1); + + c->isa32.bits[0] = READ_SYSREG32(ID_ISAR0_EL1); + c->isa32.bits[1] = READ_SYSREG32(ID_ISAR1_EL1); + c->isa32.bits[2] = READ_SYSREG32(ID_ISAR2_EL1); + c->isa32.bits[3] = READ_SYSREG32(ID_ISAR3_EL1); + c->isa32.bits[4] = READ_SYSREG32(ID_ISAR4_EL1); + c->isa32.bits[5] = READ_SYSREG32(ID_ISAR5_EL1); +} + +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c index 494bed6..de1d837 100644 --- a/xen/arch/arm/domain.c +++ b/xen/arch/arm/domain.c @@ -1,3 +1,14 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ #include <xen/config.h> #include <xen/init.h> #include <xen/lib.h> @@ -13,6 +24,7 @@ #include <asm/regs.h> #include <asm/p2m.h> #include <asm/irq.h> +#include <asm/cpufeature.h> #include <asm/gic.h> #include "vtimer.h" @@ -58,11 +70,13 @@ static void ctxt_switch_from(struct vcpu *p) /* Arch timer */ virt_timer_save(p); -#if defined(CONFIG_ARM_32x) - /* XXX only save these if ThumbEE e.g. ID_PFR0.THUMB_EE_SUPPORT */ - p->arch.teecr = READ_CP32(TEECR); - p->arch.teehbr = READ_CP32(TEEHBR); + if ( is_pv32_domain(p->domain) && cpu_has_thumbee ) + { + p->arch.teecr = READ_SYSREG32(TEECR32_EL1); + p->arch.teehbr = READ_SYSREG32(TEEHBR32_EL1); + } +#ifdef CONFIG_ARM_32 p->arch.joscr = READ_CP32(JOSCR); p->arch.jmcr = READ_CP32(JMCR); #endif @@ -121,6 +135,9 @@ static void ctxt_switch_to(struct vcpu *n) p2m_load_VTTBR(n->domain); isb(); + WRITE_SYSREG32(n->domain->arch.vpidr, VPIDR_EL2); + WRITE_SYSREG(n->domain->arch.vmpidr, VMPIDR_EL2); + /* VGIC */ gic_restore_state(n); @@ -169,11 +186,13 @@ static void ctxt_switch_to(struct vcpu *n) WRITE_SYSREG(n->arch.tpidrro_el0, TPIDRRO_EL0); WRITE_SYSREG(n->arch.tpidr_el1, TPIDR_EL1); -#if defined(CONFIG_ARM_32x) - /* XXX only restore these if ThumbEE e.g. ID_PFR0.THUMB_EE_SUPPORT */ - WRITE_CP32(n->arch.teecr, TEECR); - WRITE_CP32(n->arch.teehbr, TEEHBR); + if ( is_pv32_domain(n->domain) && cpu_has_thumbee ) + { + WRITE_SYSREG32(n->arch.teecr, TEECR32_EL1); + WRITE_SYSREG32(n->arch.teehbr, TEEHBR32_EL1); + } +#ifdef CONFIG_ARM_32 WRITE_CP32(n->arch.joscr, JOSCR); WRITE_CP32(n->arch.jmcr, JMCR); #endif @@ -447,6 +466,10 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags) if ( (d->shared_info = alloc_xenheap_pages(0, 0)) == NULL ) goto fail; + /* Default the virtual ID to match the physical */ + d->arch.vpidr = boot_cpu_data.midr.bits; + d->arch.vmpidr = boot_cpu_data.mpidr.bits; + clear_page(d->shared_info); share_xen_page_with_guest( virt_to_page(d->shared_info), d, XENSHARE_writable); diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c index 967a8d4..d13e45d 100644 --- a/xen/arch/arm/setup.c +++ b/xen/arch/arm/setup.c @@ -40,6 +40,9 @@ #include <asm/vfp.h> #include <asm/early_printk.h> #include <asm/gic.h> +#include <asm/cpufeature.h> + +struct cpuinfo_arm __read_mostly boot_cpu_data; static __used void init_done(void) { @@ -54,41 +57,93 @@ static void __init init_idle_domain(void) /* TODO: setup_idle_pagetable(); */ } +static const char * __initdata processor_implementers[] = { + [''A''] = "ARM Limited", + [''D''] = "Digital Equipment Corp", + [''M''] = "Motorola, Freescale Semiconductor Inc.", + [''Q''] = "Qualcomm Inc.", + [''V''] = "Marvell Semiconductor Inc.", + [''i''] = "Intel Corporation", +}; + static void __init processor_id(void) { + const char *implementer = "Unknown"; + struct cpuinfo_arm *c = &boot_cpu_data; + + identify_cpu(c); + current_cpu_data = *c; + + if ( c->midr.implementer < ARRAY_SIZE(processor_implementers) && + processor_implementers[c->midr.implementer] ) + implementer = processor_implementers[c->midr.implementer]; - /* Setup the virtual ID to match the physical */ - WRITE_SYSREG32(READ_SYSREG32(MIDR_EL1), VPIDR_EL2); - WRITE_SYSREG(READ_SYSREG(MPIDR_EL1), VMPIDR_EL2); + if ( c->midr.architecture != 0xf ) + printk("Huh, cpu architecture %x, expected 0xf (defined by cpuid)\n", + c->midr.architecture); + + printk("Processor: \"%s\", variant: 0x%x, part 0x%03x, rev 0x%x\n", + implementer, c->midr.variant, c->midr.part_number, c->midr.revision); #if defined(CONFIG_ARM_64) - printk("64-bit Processor Features: %016"PRIx64" %016"PRIx64"\n", - READ_SYSREG64(ID_AA64PFR0_EL1), READ_SYSREG64(ID_AA64PFR1_EL1)); - printk("64-bit Debug Features: %016"PRIx64" %016"PRIx64"\n", - READ_SYSREG64(ID_AA64DFR0_EL1), READ_SYSREG64(ID_AA64DFR1_EL1)); - printk("64-bit Auxiliary Features: %016"PRIx64" %016"PRIx64"\n", - READ_SYSREG64(ID_AA64AFR0_EL1), READ_SYSREG64(ID_AA64AFR1_EL1)); - printk("64-bit Memory Model Features: %016"PRIx64" %016"PRIx64"\n", - READ_SYSREG64(ID_AA64MMFR0_EL1), READ_SYSREG64(ID_AA64MMFR1_EL1)); - printk("64-bit ISA Features: %016"PRIx64" %016"PRIx64"\n", - READ_SYSREG64(ID_AA64ISAR0_EL1), READ_SYSREG64(ID_AA64ISAR1_EL1)); + printk("64-bit Execution:\n"); + printk(" Processor Features: %016"PRIx64" %016"PRIx64"\n", + boot_cpu_data.pfr64.bits[0], boot_cpu_data.pfr64.bits[1]); + printk(" Exception Levels: EL3:%s EL2:%s EL1:%s EL0:%s\n", + cpu_has_el3_32 ? "64+32" : cpu_has_el3_64 ? "64" : "No", + cpu_has_el2_32 ? "64+32" : cpu_has_el2_64 ? "64" : "No", + cpu_has_el1_32 ? "64+32" : cpu_has_el1_64 ? "64" : "No", + cpu_has_el0_32 ? "64+32" : cpu_has_el0_64 ? "64" : "No"); + printk(" Extensions:%s%s\n", + cpu_has_fp ? " FloatingPoint" : "", + cpu_has_simd ? " AdvancedSIMD" : ""); + + printk(" Debug Features: %016"PRIx64" %016"PRIx64"\n", + boot_cpu_data.dbg64.bits[0], boot_cpu_data.dbg64.bits[1]); + printk(" Auxiliary Features: %016"PRIx64" %016"PRIx64"\n", + boot_cpu_data.aux64.bits[0], boot_cpu_data.aux64.bits[1]); + printk(" Memory Model Features: %016"PRIx64" %016"PRIx64"\n", + boot_cpu_data.mm64.bits[0], boot_cpu_data.mm64.bits[1]); + printk(" ISA Features: %016"PRIx64" %016"PRIx64"\n", + boot_cpu_data.isa64.bits[0], boot_cpu_data.isa64.bits[1]); #endif + /* * On AArch64 these refer to the capabilities when running in * AArch32 mode. */ - printk("32-bit Processor Features: %08x %08x\n", - READ_SYSREG32(ID_PFR0_EL1), READ_SYSREG32(ID_PFR1_EL1)); - printk("32-bit Debug Features: %08x\n", READ_SYSREG32(ID_DFR0_EL1)); - printk("32-bit Auxiliary Features: %08x\n", READ_SYSREG32(ID_AFR0_EL1)); - printk("32-bit Memory Model Features: %08x %08x %08x %08x\n", - READ_SYSREG32(ID_MMFR0_EL1), READ_SYSREG32(ID_MMFR1_EL1), - READ_SYSREG32(ID_MMFR2_EL1), READ_SYSREG32(ID_MMFR3_EL1)); - printk("32-bit ISA Features: %08x %08x %08x %08x %08x %08x\n", - READ_SYSREG32(ID_ISAR0_EL1), READ_SYSREG32(ID_ISAR1_EL1), - READ_SYSREG32(ID_ISAR2_EL1), READ_SYSREG32(ID_ISAR3_EL1), - READ_SYSREG32(ID_ISAR4_EL1), READ_SYSREG32(ID_ISAR5_EL1)); - + if ( cpu_has_aarch32 ) + { + printk("32-bit Execution:\n"); + printk(" Processor Features: %08"PRIx32":%08"PRIx32"\n", + boot_cpu_data.pfr32.bits[0], boot_cpu_data.pfr32.bits[1]); + printk(" Instruction Sets:%s%s%s%s%s\n", + cpu_has_aarch32 ? " AArch32" : "", + cpu_has_thumb ? " Thumb" : "", + cpu_has_thumb2 ? " Thumb-2" : "", + cpu_has_thumbee ? " ThumbEE" : "", + cpu_has_jazelle ? " Jazelle" : ""); + printk(" Extensions:%s%s\n", + cpu_has_gentimer ? " GenericTimer" : "", + cpu_has_security ? " Security" : ""); + + printk(" Debug Features: %08"PRIx32"\n", + boot_cpu_data.dbg32.bits[0]); + printk(" Auxiliary Features: %08"PRIx32"\n", + boot_cpu_data.aux32.bits[0]); + printk(" Memory Model Features: " + "%08"PRIx32" %08"PRIx32" %08"PRIx32" %08"PRIx32"\n", + boot_cpu_data.mm32.bits[0], boot_cpu_data.mm32.bits[1], + boot_cpu_data.mm32.bits[2], boot_cpu_data.mm32.bits[3]); + printk(" ISA Features: %08x %08x %08x %08x %08x %08x\n", + boot_cpu_data.isa32.bits[0], boot_cpu_data.isa32.bits[1], + boot_cpu_data.isa32.bits[2], boot_cpu_data.isa32.bits[3], + boot_cpu_data.isa32.bits[4], boot_cpu_data.isa32.bits[5]); + } + else + { + printk("32-bit Execution: Unsupported\n"); + } } void __init discard_initial_modules(void) @@ -393,6 +448,8 @@ void __init start_xen(unsigned long boot_phys_offset, console_init_preirq(); #endif + processor_id(); + init_xen_time(); gic_init(); @@ -416,8 +473,6 @@ void __init start_xen(unsigned long boot_phys_offset, */ WRITE_SYSREG32(0x80002558, VTCR_EL2); isb(); - processor_id(); - enable_vfp(); softirq_init(); diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c index b18f137..cadf79f 100644 --- a/xen/arch/arm/smpboot.c +++ b/xen/arch/arm/smpboot.c @@ -38,6 +38,8 @@ EXPORT_SYMBOL(cpu_online_map); cpumask_t cpu_possible_map; EXPORT_SYMBOL(cpu_possible_map); +struct cpuinfo_arm cpu_data[NR_CPUS]; + /* Fake one node for now. See also include/asm-arm/numa.h */ nodemask_t __read_mostly node_online_map = { { [0] = 1UL } }; @@ -136,11 +138,16 @@ void __cpuinit start_secondary(unsigned long boot_phys_offset, unsigned long fdt_paddr, unsigned long cpuid) { + struct cpuinfo_arm *c = cpu_data + cpuid; + memset(get_cpu_info(), 0, sizeof (struct cpu_info)); /* TODO: handle boards where CPUIDs are not contiguous */ set_processor_id(cpuid); + *c = boot_cpu_data; + identify_cpu(c); + /* Setup Hyp vector base */ WRITE_SYSREG((vaddr_t)&hyp_traps_vector, VBAR_EL2); diff --git a/xen/arch/arm/time.c b/xen/arch/arm/time.c index ee92d8c..81d490d 100644 --- a/xen/arch/arm/time.c +++ b/xen/arch/arm/time.c @@ -31,6 +31,7 @@ #include <asm/system.h> #include <asm/time.h> #include <asm/gic.h> +#include <asm/cpufeature.h> /* * Unfortunately the hypervisor timer interrupt appears to be buggy in @@ -90,10 +91,8 @@ static uint32_t calibrate_timer(void) int __init init_xen_time(void) { /* Check that this CPU supports the Generic Timer interface */ -#if defined(CONFIG_ARM_32) - if ( (READ_CP32(ID_PFR1) & ID_PFR1_GT_MASK) != ID_PFR1_GT_v1 ) + if ( !cpu_has_gentimer ) panic("CPU does not support the Generic Timer v1 interface.\n"); -#endif cpu_khz = READ_SYSREG32(CNTFRQ_EL0) / 1000; boot_count = READ_SYSREG64(CNTPCT_EL0); diff --git a/xen/include/asm-arm/cpregs.h b/xen/include/asm-arm/cpregs.h index 908aad9..a72ca62 100644 --- a/xen/include/asm-arm/cpregs.h +++ b/xen/include/asm-arm/cpregs.h @@ -265,10 +265,14 @@ #define ID_PFR0_EL1 ID_PFR0 #define ID_PFR1_EL1 ID_PFR1 #define IFSR32_EL2 IFSR +#define MIDR_EL1 MIDR +#define MPIDR_EL1 MPIDR #define PAR_EL1 PAR #define SCTLR_EL1 SCTLR #define SCTLR_EL2 HSCTLR #define TCR_EL1 TTBCR +#define TEECR32_EL1 TEECR +#define TEEHBR32_EL1 TEEHBR #define TPIDRRO_EL0 TPIDRURO #define TPIDR_EL0 TPIDRURW #define TPIDR_EL1 TPIDRPRW @@ -278,13 +282,10 @@ #define TTBR1_EL1 TTBR1 #define VBAR_EL1 VBAR #define VBAR_EL2 HVBAR +#define VMPIDR_EL2 VMPIDR +#define VPIDR_EL2 VPIDR #define VTCR_EL2 VTCR #define VTTBR_EL2 VTTBR -#define MIDR_EL1 MIDR -#define VPIDR_EL2 VPIDR -#define MPIDR_EL1 MPIDR -#define VMPIDR_EL2 VMPIDR - #endif #endif diff --git a/xen/include/asm-arm/cpufeature.h b/xen/include/asm-arm/cpufeature.h new file mode 100644 index 0000000..e633239 --- /dev/null +++ b/xen/include/asm-arm/cpufeature.h @@ -0,0 +1,40 @@ +#ifndef __ASM_ARM_CPUFEATURE_H +#define __ASM_ARM_CPUFEATURE_H + +#ifdef CONFIG_ARM_64 +#define cpu_feature64(c, feat) ((c)->pfr64.feat) +#define boot_cpu_feature64(feat) (boot_cpu_data.pfr64.feat) + +#define cpu_has_el0_32 (boot_cpu_feature64(el0) == 2) +#define cpu_has_el0_64 (boot_cpu_feature64(el0) >= 1) +#define cpu_has_el1_32 (boot_cpu_feature64(el1) == 2) +#define cpu_has_el1_64 (boot_cpu_feature64(el1) >= 1) +#define cpu_has_el2_32 (boot_cpu_feature64(el2) == 2) +#define cpu_has_el2_64 (boot_cpu_feature64(el2) >= 1) +#define cpu_has_el3_32 (boot_cpu_feature64(el3) == 2) +#define cpu_has_el3_64 (boot_cpu_feature64(el3) >= 1) +#define cpu_has_fp (boot_cpu_feature64(fp) == 0) +#define cpu_has_simd (boot_cpu_feature64(simd) == 0) +#endif + +#define cpu_feature32(c, feat) ((c)->pfr32.feat) +#define boot_cpu_feature32(feat) (boot_cpu_data.pfr32.feat) + +#define cpu_has_aarch32 (boot_cpu_feature32(arm) == 1) +#define cpu_has_thumb (boot_cpu_feature32(thumb) >= 1) +#define cpu_has_thumb2 (boot_cpu_feature32(thumb) >= 3) +#define cpu_has_jazelle (boot_cpu_feature32(jazelle) >= 0) +#define cpu_has_thumbee (boot_cpu_feature32(thumbee) == 1) + +#define cpu_has_gentimer (boot_cpu_feature32(gentimer) == 1) +#define cpu_has_security (boot_cpu_feature32(security) > 0) + +#endif +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h index 4a4bf2f..601e972 100644 --- a/xen/include/asm-arm/domain.h +++ b/xen/include/asm-arm/domain.h @@ -57,6 +57,10 @@ struct arch_domain struct hvm_domain hvm_domain; xen_pfn_t *grant_table_gpfn; + /* Virtual CPUID */ + uint32_t vpidr; + register_t vmpidr; + struct { /* * Covers access to other members of this struct _except_ for @@ -166,8 +170,12 @@ struct arch_vcpu register_t tpidr_el1; register_t tpidrro_el0; + uint32_t teecr, teehbr; /* ThumbEE, 32-bit guests only */ #ifdef CONFIG_ARM_32 - uint32_t teecr, teehbr; + /* + * ARMv8 only supports a trivial implementation on Jazelle when in AArch32 + * mode and therefore has no extended control registers. + */ uint32_t joscr, jmcr; #endif diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h index 1072aa2..0515986 100644 --- a/xen/include/asm-arm/processor.h +++ b/xen/include/asm-arm/processor.h @@ -91,6 +91,123 @@ #define HSR_EC_DATA_ABORT_HYP 0x25 #ifndef __ASSEMBLY__ + +#include <xen/types.h> + +struct cpuinfo_arm { + union { + uint32_t bits; + struct { + unsigned long revision:4; + unsigned long part_number:12; + unsigned long architecture:4; + unsigned long variant:4; + unsigned long implementer:8; + }; + } midr; + union { + register_t bits; + struct { + unsigned long aff0:8; + unsigned long aff1:8; + unsigned long aff2:8; + unsigned long mt:1; /* Multi-thread, iff MP == 1 */ + unsigned long __res0:5; + unsigned long up:1; /* UP system, iff MP == 1 */ + unsigned long mp:1; /* MP extensions */ + +#ifdef CONFIG_ARM_64 + unsigned long aff3:8; + unsigned long __res1:24; +#endif + }; + } mpidr; + +#ifdef CONFIG_ARM_64 + /* 64-bit CPUID registers. */ + union { + uint64_t bits[2]; + struct { + unsigned long el0:4; + unsigned long el1:4; + unsigned long el2:4; + unsigned long el3:4; + unsigned long fp:4; /* Floating Point */ + unsigned long simd:4; /* Advanced SIMD */ + unsigned long __res0:8; + + unsigned long __res1; + }; + } pfr64; + + struct { + uint64_t bits[2]; + } dbg64; + + struct { + uint64_t bits[2]; + } aux64; + + struct { + uint64_t bits[2]; + } mm64; + + struct { + uint64_t bits[2]; + } isa64; + +#endif + + /* + * 32-bit CPUID registers. On ARMv8 these describe the properties + * when running in 32-bit mode. + */ + union { + uint32_t bits[2]; + struct { + unsigned long arm:4; + unsigned long thumb:4; + unsigned long jazelle:4; + unsigned long thumbee:4; + unsigned long __res0:16; + + unsigned long progmodel:4; + unsigned long security:4; + unsigned long mprofile:4; + unsigned long virt:4; + unsigned long gentimer:4; + unsigned long __res1:12; + }; + } pfr32; + + struct { + uint32_t bits[1]; + } dbg32; + + struct { + uint32_t bits[1]; + } aux32; + + struct { + uint32_t bits[4]; + } mm32; + + struct { + uint32_t bits[6]; + } isa32; +}; + +/* + * capabilities of CPUs + */ + +extern struct cpuinfo_arm boot_cpu_data; + +extern void identify_cpu(struct cpuinfo_arm *); + +extern struct cpuinfo_arm cpu_data[]; +#define current_cpu_data cpu_data[smp_processor_id()] + union hsr { uint32_t bits; struct { @@ -225,10 +342,6 @@ union hsr { #define CNTx_CTL_MASK (1u<<1) /* Mask IRQ */ #define CNTx_CTL_PENDING (1u<<2) /* IRQ pending */ -/* CPUID bits */ -#define ID_PFR1_GT_MASK 0x000F0000 /* Generic Timer interface support */ -#define ID_PFR1_GT_v1 0x00010000 - #if defined(CONFIG_ARM_32) # include <asm/arm32/processor.h> #elif defined(CONFIG_ARM_64) -- 1.7.2.5
Ian Campbell
2013-Feb-15 13:36 UTC
Re: [PATCH V2 01/46] xen: arm32: Don''t bother with the bootloader provided ARM-Linux machine type
On Thu, 2013-02-14 at 16:47 +0000, Ian Campbell wrote:> Everything is DTB based and on 64-bit there is no such concept even in > Linux. > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>Stefano Acked in <alpine.DEB.2.02.1301241739070.29727@kaball.uk.xensource.com> which I missed picking up here. Applied, thanks.> --- > v2: Update start_secondary too. > --- > xen/arch/arm/arm32/head.S | 7 +++---- > xen/arch/arm/setup.c | 1 - > xen/arch/arm/smpboot.c | 1 - > 3 files changed, 3 insertions(+), 6 deletions(-) > > diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S > index 20e9da6..92fc36c 100644 > --- a/xen/arch/arm/arm32/head.S > +++ b/xen/arch/arm/arm32/head.S > @@ -72,7 +72,7 @@ past_zImage: > cpsid aif /* Disable all interrupts */ > > /* Save the bootloader arguments in less-clobberable registers */ > - mov r7, r1 /* r7 := ARM-linux machine type */ > + /* No need to save r1 == Unused ARM-linux machine type */ > mov r8, r2 /* r8 := ATAG base address */ > > /* Find out where we are */ > @@ -334,9 +334,8 @@ launch: > add sp, #STACK_SIZE /* (which grows down from the top). */ > sub sp, #CPUINFO_sizeof /* Make room for CPU save record */ > mov r0, r10 /* Marshal args: - phys_offset */ > - mov r1, r7 /* - machine type */ > - mov r2, r8 /* - ATAG address */ > - movs r3, r12 /* - CPU ID */ > + mov r1, r8 /* - ATAG address */ > + movs r2, r12 /* - CPU ID */ > beq start_xen /* and disappear into the land of C */ > b start_secondary /* (to the appropriate entry point) */ > > diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c > index acb7abb..782d252 100644 > --- a/xen/arch/arm/setup.c > +++ b/xen/arch/arm/setup.c > @@ -329,7 +329,6 @@ void __init setup_cache(void) > > /* C entry point for boot CPU */ > void __init start_xen(unsigned long boot_phys_offset, > - unsigned long arm_type, > unsigned long atag_paddr, > unsigned long cpuid) > { > diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c > index c7a586b..da4880c 100644 > --- a/xen/arch/arm/smpboot.c > +++ b/xen/arch/arm/smpboot.c > @@ -132,7 +132,6 @@ make_cpus_ready(unsigned int max_cpus, unsigned long boot_phys_offset) > > /* Boot the current CPU */ > void __cpuinit start_secondary(unsigned long boot_phys_offset, > - unsigned long arm_type, > unsigned long atag_paddr, > unsigned long cpuid) > {
Ian Campbell
2013-Feb-15 13:36 UTC
Re: [PATCH V2 02/46] xen: arm: rename atag_paddr argument fdt_paddr
On Thu, 2013-02-14 at 16:47 +0000, Ian Campbell wrote:> We don''t support ATAGs and this is always actually an FDT address. > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > Acked-by: Tim Deegan <tim@xen.org>Applied, thanks.
Ian Campbell
2013-Feb-15 13:37 UTC
Re: [PATCH V2 03/46] xen: arm: do not pass a machine ID to dom0
On Thu, 2013-02-14 at 16:47 +0000, Ian Campbell wrote:> Xen relies on DTB and we pass in a suitable device-tree so we don''t > need to (and shouldn''t) pretend to be a Versatile Express here. > > We already don''t pass a machine ID to domU in the same way. > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > Acked-by: Tim Deegan <tim@xen.org>Applied, thanks.
Tim Deegan
2013-Feb-21 14:51 UTC
Re: [PATCH V2 04/46] arm: avoid inline asm for dsb, isb, wfi and sev.
At 16:47 +0000 on 14 Feb (1360860438), Ian Campbell wrote:> "dsb" must be written "dsb sy" on arm64. "dsb sy" is also valid (and > synonymous) on arm32 but we have a macro so lets use it. > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>Stefano''s acked this one too. But for good measure, Acked-by: Tim Deegan <tim@xen.org>
Tim Deegan
2013-Feb-21 14:56 UTC
Re: [PATCH V2 05/46] xen: arm64: initial build + config changes, start of day code
At 16:47 +0000 on 14 Feb (1360860439), Ian Campbell wrote:> +2: PRINT("- Started in Hyp mode -\r\n") > + > +hyp:I though we were going to use "EL3" instead of "Hyp".> + /* Non-boot CPUs need to move on to the relocated pagetables */ > + //mov x0, #0This line should go.> +/* > + * xen/arch/arm/arm64/mode_switch.S > + * > + * Start-of day code to take a CPU from EL3 to EL2. Largely taken from > + bootwrapper.Still missing a *. Tim.
At 16:47 +0000 on 14 Feb (1360860442), Ian Campbell wrote:> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>Acked-by: Tim Deegan <tim@xen.org>
At 16:47 +0000 on 14 Feb (1360860444), Ian Campbell wrote:> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>Acked-by: Tim Deegan <tim@xen.org>
Tim Deegan
2013-Feb-21 15:01 UTC
Re: [PATCH V2 14/46] xen: arm64: barriers and wait for interrupts/events
At 16:47 +0000 on 14 Feb (1360860448), Ian Campbell wrote:> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>Acked-by: Tim Deegan <tim@xen.org> Were we also talking about having smb_ barriers equivalent to the normas ones, like on x86?
Tim Deegan
2013-Feb-21 15:04 UTC
Re: [PATCH V2 19/46] xen: arm64: changes to setup_pagetables and mm.c
At 16:47 +0000 on 14 Feb (1360860453), Ian Campbell wrote:> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>Acked-by: Tim Deegan <tim@xen.org>
Tim Deegan
2013-Feb-21 15:05 UTC
Re: [PATCH V2 25/46] xen: arm64: add guest type to domain field.
At 16:47 +0000 on 14 Feb (1360860459), Ian Campbell wrote:> Currently 32 bit PV is the only option. > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>Acked-by: Tim Deegan <tim@xen.org>
At 16:47 +0000 on 14 Feb (1360860461), Ian Campbell wrote:> Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > --- > v2: Call leave_hypervisor_tail on exit back to guest, disable interrupts while > restoring state.You don''t seem to have addressed my other comments on v1:> --- a/xen/arch/arm/arm64/Makefile > +++ b/xen/arch/arm/arm64/Makefile > @@ -1,5 +1,7 @@ > subdir-y += lib > > +obj-y += entry.o > obj-y += mode_switch.o > > +obj-y += traps.o > obj-y += domain.oAlphabetical order, please.> +#define __L2(_x) (((_x) & 0x00000002) ? 1 : 0) > +#define __L4(_x) (((_x) & 0x0000000c) ? ( 2 + __L2( (_x)>> 2)) : __L2( _x)) > +#define __L8(_x) (((_x) & 0x000000f0) ? ( 4 + __L4( (_x)>> 4)) : __L4( _x)) > +#define __L16(_x) (((_x) & 0x0000ff00) ? ( 8 + __L8( (_x)>> 8)) : __L8( _x)) > +#define LOG_2(_x) (((_x) & 0xffff0000) ? (16 + __L16((_x)>>16)) : __L16(_x))This is now replicated in three places. Maybe it should live in, say, xen/bitops.h?> --- a/xen/include/asm-arm/processor.h > +++ b/xen/include/asm-arm/processor.h > @@ -238,7 +238,7 @@ union hsr { > #endif > > #ifndef __ASSEMBLY__ > -extern uint32_t hyp_traps_vector[8]; > +extern uint32_t hyp_traps_vector;Keep the array type? uint8_t[] would do, or define up something the right size. Cheers, Tim.
Tim Deegan
2013-Feb-21 15:11 UTC
Re: [PATCH V2 31/46] xen: arm: show_registers() support for 64-bit.
At 16:47 +0000 on 14 Feb (1360860465), Ian Campbell wrote:> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>Acked-by: Tim Deegan <tim@xen.org>
Tim Deegan
2013-Feb-21 15:12 UTC
Re: [PATCH V2 44/46] xen: arm: print arm64 not arm32 in xen info when appropriate.
At 16:47 +0000 on 14 Feb (1360860478), Ian Campbell wrote:> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>Acked-by: Tim Deegan <tim@xen.org>
At 16:47 +0000 on 14 Feb (1360860479), Ian Campbell wrote:> Need to check for the 64-bit EL2 modes, not 32-bit HYP mode. > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>Acked-by: Tim Deegan <tim@xen.org>
On Thu, 2013-02-21 at 15:10 +0000, Tim Deegan wrote:> At 16:47 +0000 on 14 Feb (1360860461), Ian Campbell wrote: > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > > --- > > v2: Call leave_hypervisor_tail on exit back to guest, disable interrupts while > > restoring state. > > You don''t seem to have addressed my other comments on v1:I''ve got them in v3, I noted that I hadn''t addresses you comment on this patch in the #0/46.> > --- a/xen/arch/arm/arm64/Makefile > > +++ b/xen/arch/arm/arm64/Makefile > > @@ -1,5 +1,7 @@ > > subdir-y += lib > > > > +obj-y += entry.o > > obj-y += mode_switch.o > > > > +obj-y += traps.o > > obj-y += domain.o > > Alphabetical order, please.I kept this the same order as arm32/Makefile on purpose.> > +#define __L2(_x) (((_x) & 0x00000002) ? 1 : 0) > > +#define __L4(_x) (((_x) & 0x0000000c) ? ( 2 + __L2( (_x)>> 2)) : __L2( _x)) > > +#define __L8(_x) (((_x) & 0x000000f0) ? ( 4 + __L4( (_x)>> 4)) : __L4( _x)) > > +#define __L16(_x) (((_x) & 0x0000ff00) ? ( 8 + __L8( (_x)>> 8)) : __L8( _x)) > > +#define LOG_2(_x) (((_x) & 0xffff0000) ? (16 + __L16((_x)>>16)) : __L16(_x)) > > This is now replicated in three places. Maybe it should live in, say, > xen/bitops.h?[...]> Keep the array type? uint8_t[] would do, or define up something the > right size.I''ve got both of these in my tree already for v3. Ian.
Ian Campbell
2013-Feb-21 15:26 UTC
Re: [PATCH V2 05/46] xen: arm64: initial build + config changes, start of day code
On Thu, 2013-02-21 at 14:56 +0000, Tim Deegan wrote:> At 16:47 +0000 on 14 Feb (1360860439), Ian Campbell wrote: > > +2: PRINT("- Started in Hyp mode -\r\n") > > + > > +hyp: > > I though we were going to use "EL3" instead of "Hyp".Sorry, looks like I missed a few comments when I went through this one.> > > + /* Non-boot CPUs need to move on to the relocated pagetables */ > > + //mov x0, #0 > > This line should go. > > > +/* > > + * xen/arch/arm/arm64/mode_switch.S > > + * > > + * Start-of day code to take a CPU from EL3 to EL2. Largely taken from > > + bootwrapper. > > Still missing a *. > > Tim.
Ian Campbell
2013-Feb-21 15:27 UTC
Re: [PATCH V2 14/46] xen: arm64: barriers and wait for interrupts/events
On Thu, 2013-02-21 at 15:01 +0000, Tim Deegan wrote:> At 16:47 +0000 on 14 Feb (1360860448), Ian Campbell wrote: > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > > Acked-by: Tim Deegan <tim@xen.org> > > Were we also talking about having smb_ barriers equivalent to the normas > ones, like on x86?Yes, I think in a F2F conversation which is why I forgot. Ian.
At 15:25 +0000 on 21 Feb (1361460324), Ian Campbell wrote:> On Thu, 2013-02-21 at 15:10 +0000, Tim Deegan wrote: > > At 16:47 +0000 on 14 Feb (1360860461), Ian Campbell wrote: > > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > > > --- > > > v2: Call leave_hypervisor_tail on exit back to guest, disable interrupts while > > > restoring state. > > > > You don''t seem to have addressed my other comments on v1: > > I''ve got them in v3, I noted that I hadn''t addresses you comment on this > patch in the #0/46.So you did; I did read the 0/46, but for some reason all that stuck in my head was the WFE stuff. AFAICS you just need to re-roll this and #25, and get a tools-person to ack #20. So for v3, can you just send those, and avoid another 46-patch mailbomb? :) Cheers, Tim.
On Thu, 2013-02-21 at 15:25 +0000, Ian Campbell wrote:> On Thu, 2013-02-21 at 15:10 +0000, Tim Deegan wrote: > > At 16:47 +0000 on 14 Feb (1360860461), Ian Campbell wrote: > > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > > > --- > > > v2: Call leave_hypervisor_tail on exit back to guest, disable interrupts while > > > restoring state. > > > > You don''t seem to have addressed my other comments on v1: > > I''ve got them in v3, I noted that I hadn''t addresses you comment on this > patch in the #0/46.Here is v3. Needs "xen: consolidate implementations of LOG() macro" which I''ve just posted. Ian. 8<-------------------------------------------------- From 6978a03e10316ff997c91ccd6f88be110dfcffec Mon Sep 17 00:00:00 2001 From: Ian Campbell <ian.campbell@citrix.com> Date: Mon, 21 Jan 2013 17:33:31 +0000 Subject: [PATCH] xen: arm: arm64 trap handling. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- v3: use bitops.h provided LOG() macro use simple bl instead of preloading lr and b remove an incorrectly placed and inaccurate comment declare hyp_traps_vector as an array, avoiding & on uses v2: Call leave_hypervisor_tail on exit back to guest, disable interrupts while restoring state. --- xen/arch/arm/arm64/Makefile | 2 + xen/arch/arm/arm64/asm-offsets.c | 58 +++++++++ xen/arch/arm/arm64/entry.S | 254 ++++++++++++++++++++++++++++++++++++++ xen/arch/arm/arm64/traps.c | 56 +++++++++ xen/arch/arm/smpboot.c | 2 +- xen/arch/arm/traps.c | 17 ++- xen/include/asm-arm/cpregs.h | 1 + xen/include/asm-arm/processor.h | 2 +- 8 files changed, 386 insertions(+), 6 deletions(-) create mode 100644 xen/arch/arm/arm64/asm-offsets.c create mode 100644 xen/arch/arm/arm64/entry.S create mode 100644 xen/arch/arm/arm64/traps.c diff --git a/xen/arch/arm/arm64/Makefile b/xen/arch/arm/arm64/Makefile index 815f305..be41f43 100644 --- a/xen/arch/arm/arm64/Makefile +++ b/xen/arch/arm/arm64/Makefile @@ -1,5 +1,7 @@ subdir-y += lib +obj-y += entry.o obj-y += mode_switch.o +obj-y += traps.o obj-y += domain.o diff --git a/xen/arch/arm/arm64/asm-offsets.c b/xen/arch/arm/arm64/asm-offsets.c new file mode 100644 index 0000000..7949e3e --- /dev/null +++ b/xen/arch/arm/arm64/asm-offsets.c @@ -0,0 +1,58 @@ +/* + * Generate definitions needed by assembly language modules. + * This code generates raw asm output which is post-processed + * to extract and format the required data. + */ +#define COMPILE_OFFSETS + +#include <xen/config.h> +#include <xen/types.h> +#include <xen/sched.h> +#include <xen/bitops.h> +#include <public/xen.h> +#include <asm/current.h> + +#define DEFINE(_sym, _val) \ + __asm__ __volatile__ ( "\n->" #_sym " %0 " #_val : : "i" (_val) ) +#define BLANK() \ + __asm__ __volatile__ ( "\n->" : : ) +#define OFFSET(_sym, _str, _mem) \ + DEFINE(_sym, offsetof(_str, _mem)); + +void __dummy__(void) +{ + OFFSET(UREGS_X0, struct cpu_user_regs, x0); + OFFSET(UREGS_LR, struct cpu_user_regs, lr); + + OFFSET(UREGS_SP, struct cpu_user_regs, sp); + OFFSET(UREGS_PC, struct cpu_user_regs, pc); + OFFSET(UREGS_CPSR, struct cpu_user_regs, cpsr); + + OFFSET(UREGS_SPSR_el1, struct cpu_user_regs, spsr_el1); + + OFFSET(UREGS_SPSR_fiq, struct cpu_user_regs, spsr_fiq); + OFFSET(UREGS_SPSR_irq, struct cpu_user_regs, spsr_irq); + OFFSET(UREGS_SPSR_und, struct cpu_user_regs, spsr_und); + OFFSET(UREGS_SPSR_abt, struct cpu_user_regs, spsr_abt); + + OFFSET(UREGS_SP_el0, struct cpu_user_regs, sp_el0); + OFFSET(UREGS_SP_el1, struct cpu_user_regs, sp_el1); + OFFSET(UREGS_ELR_el1, struct cpu_user_regs, elr_el1); + + OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, cpsr); + DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs)); + BLANK(); + + DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info)); + + OFFSET(VCPU_arch_saved_context, struct vcpu, arch.saved_context); +} + +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S new file mode 100644 index 0000000..e35b6ea --- /dev/null +++ b/xen/arch/arm/arm64/entry.S @@ -0,0 +1,254 @@ +#include <xen/config.h> +#include <asm/asm_defns.h> +#include <public/xen.h> + +/* + * Register aliases. + */ +lr .req x30 // link register + +/* + * Stack pushing/popping (register pairs only). Equivalent to store decrement + * before, load increment after. + */ + .macro push, xreg1, xreg2 + stp \xreg1, \xreg2, [sp, #-16]! + .endm + + .macro pop, xreg1, xreg2 + ldp \xreg1, \xreg2, [sp], #16 + .endm + +/* + * Save/restore guest mode specific state, outer stack frame + */ + .macro entry_guest, compat + + add x21, sp, #UREGS_SPSR_el1 + mrs x23, SPSR_EL1 + str x23, [x21] + + .if \compat == 0 /* Aarch64 mode */ + + add x21, sp, #UREGS_SP_el0 + mrs x22, SP_el0 + str x22, [x21] + + add x21, sp, #UREGS_ELR_el1 + mrs x22, SP_el1 + mrs x23, ELR_el1 + stp x22, x23, [x21] + + .else /* Aarch32 mode */ + + add x21, sp, #UREGS_SPSR_fiq + mrs x22, spsr_fiq + mrs x23, spsr_irq + stp w22, w23, [x21] + + add x21, sp, #UREGS_SPSR_und + mrs x22, spsr_und + mrs x23, spsr_abt + stp w22, w23, [x21] + + .endif + + .endm + +/* + * Save state on entry to hypervisor + */ + .macro entry, hyp, compat + sub sp, sp, #(UREGS_SPSR_el1 - UREGS_SP) + push x28, x29 + push x26, x27 + push x24, x25 + push x22, x23 + push x20, x21 + push x18, x19 + push x16, x17 + push x14, x15 + push x12, x13 + push x10, x11 + push x8, x9 + push x6, x7 + push x4, x5 + push x2, x3 + push x0, x1 + + .if \hyp == 1 /* Hypervisor mode */ + + add x21, sp, #(UREGS_X0 - UREGS_SP) + + .else /* Guest mode */ + + entry_guest \compat + mov x21, ~0 /* sp only valid for hyp frame XXX */ + + .endif + + stp lr, x21, [sp, #UREGS_LR] + + mrs x22, elr_el2 + mrs x23, spsr_el2 + stp x22, x23, [sp, #UREGS_PC] + + .endm + +/* + * Bad Abort numbers + *----------------- + */ +#define BAD_SYNC 0 +#define BAD_IRQ 1 +#define BAD_FIQ 2 +#define BAD_ERROR 3 + + .macro invalid, reason + mov x0, sp + mov x1, #\reason + b do_bad_mode + .endm + +hyp_sync_invalid: + entry hyp=1 + invalid BAD_SYNC + +hyp_irq_invalid: + entry hyp=1 + invalid BAD_IRQ + +hyp_fiq_invalid: + entry hyp=1 + invalid BAD_FIQ + +hyp_error_invalid: + entry hyp=1 + invalid BAD_ERROR + +/* Traps taken in Current EL with SP_ELx */ +hyp_sync: + entry hyp=1 + msr daifclr, #2 + mov x0, sp + bl do_trap_hypervisor + b return_to_hypervisor + +hyp_irq: + entry hyp=1 + mov x0, sp + bl do_trap_irq + b return_to_hypervisor + +guest_sync: + entry hyp=0, compat=0 + invalid BAD_SYNC /* No AArch64 guest support yet */ + +guest_irq: + entry hyp=0, compat=0 + invalid BAD_IRQ /* No AArch64 guest support yet */ + +guest_fiq_invalid: + entry hyp=0, compat=0 + invalid BAD_FIQ + +guest_error_invalid: + entry hyp=0, compat=0 + invalid BAD_ERROR + +guest_sync_compat: + entry hyp=0, compat=1 + msr daifclr, #2 + mov x0, sp + bl do_trap_hypervisor + b return_to_guest + +guest_irq_compat: + entry hyp=0, compat=1 + mov x0, sp + bl do_trap_irq + b return_to_guest + +guest_fiq_invalid_compat: + entry hyp=0, compat=1 + invalid BAD_FIQ + +guest_error_invalid_compat: + entry hyp=0, compat=1 + invalid BAD_ERROR + +ENTRY(return_to_new_vcpu) + ldr x21, [sp, #UREGS_CPSR] + and x21, x21, #PSR_MODE_MASK + /* Returning to EL2? */ + cmp x21, #PSR_MODE_EL2t + ccmp x21, #PSR_MODE_EL2h, #0x4, ne + b.eq return_to_hypervisor /* Yes */ + /* Fall thru */ +ENTRY(return_to_guest) + bl leave_hypervisor_tail /* Disables interrupts on return */ + /* Fall thru */ +ENTRY(return_to_hypervisor) + msr daifset, #2 /* Mask interrupts */ + + ldp x21, x22, [sp, #UREGS_PC] // load ELR, SPSR + + pop x0, x1 + pop x2, x3 + pop x4, x5 + pop x6, x7 + pop x8, x9 + + msr elr_el2, x21 // set up the return data + msr spsr_el2, x22 + + pop x10, x11 + pop x12, x13 + pop x14, x15 + pop x16, x17 + pop x18, x19 + pop x20, x21 + pop x22, x23 + pop x24, x25 + pop x26, x27 + pop x28, x29 + + ldr lr, [sp], #(UREGS_SPSR_el1 - UREGS_SP) + eret + +/* + * Exception vectors. + */ + .macro ventry label + .align 7 + b \label + .endm + + .align 11 +ENTRY(hyp_traps_vector) + ventry hyp_sync_invalid // Synchronous EL2t + ventry hyp_irq_invalid // IRQ EL2t + ventry hyp_fiq_invalid // FIQ EL2t + ventry hyp_error_invalid // Error EL2t + + ventry hyp_sync // Synchronous EL2h + ventry hyp_irq // IRQ EL2h + ventry hyp_fiq_invalid // FIQ EL2h + ventry hyp_error_invalid // Error EL2h + + ventry guest_sync // Synchronous 64-bit EL0/EL1 + ventry guest_irq // IRQ 64-bit EL0/EL1 + ventry guest_fiq_invalid // FIQ 64-bit EL0/EL1 + ventry guest_error_invalid // Error 64-bit EL0/EL1 + + ventry guest_sync_compat // Synchronous 32-bit EL0/EL1 + ventry guest_irq_compat // IRQ 32-bit EL0/EL1 + ventry guest_fiq_invalid_compat // FIQ 32-bit EL0/EL1 + ventry guest_error_invalid_compat // Error 32-bit EL0/EL1 + +/* + * Local variables: + * mode: ASM + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/arm64/traps.c b/xen/arch/arm/arm64/traps.c new file mode 100644 index 0000000..02ef992 --- /dev/null +++ b/xen/arch/arm/arm64/traps.c @@ -0,0 +1,56 @@ +/* + * xen/arch/arm/arm64/traps.c + * + * ARM AArch64 Specific Trap handlers + * + * Copyright (c) 2012 Citrix Systems. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <xen/config.h> +#include <xen/lib.h> + +#include <asm/system.h> +#include <asm/processor.h> + +#include <public/xen.h> + +asmlinkage void do_trap_serror(struct cpu_user_regs *regs) +{ + panic("Unhandled serror trap\n"); +} + +static const char *handler[]= { + "Synchronous Abort", + "IRQ", + "FIQ", + "Error" +}; + +asmlinkage void do_bad_mode(struct cpu_user_regs *regs, int reason) +{ + uint64_t esr = READ_SYSREG64(ESR_EL2); + printk("Bad mode in %s handler detected, code 0x%08"PRIx64"\n", + handler[reason], esr); + + local_irq_disable(); + panic("bad mode"); +} + +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c index d8eb5d3..866ed62 100644 --- a/xen/arch/arm/smpboot.c +++ b/xen/arch/arm/smpboot.c @@ -142,7 +142,7 @@ void __cpuinit start_secondary(unsigned long boot_phys_offset, set_processor_id(cpuid); /* Setup Hyp vector base */ - WRITE_CP32((register_t) hyp_traps_vector, HVBAR); + WRITE_SYSREG((vaddr_t)hyp_traps_vector, VBAR_EL2); mmu_init_secondary_cpu(); enable_vfp(); diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index cb8a8d2..d6bdaa7 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -628,7 +628,7 @@ static void do_cp15_64(struct cpu_user_regs *regs, } -void dump_guest_s1_walk(struct domain *d, uint32_t addr) +void dump_guest_s1_walk(struct domain *d, vaddr_t addr) { uint32_t ttbcr = READ_CP32(TTBCR); uint32_t ttbr0 = READ_CP32(TTBR0); @@ -636,7 +636,7 @@ void dump_guest_s1_walk(struct domain *d, uint32_t addr) uint32_t offset; uint32_t *first = NULL, *second = NULL; - printk("dom%d VA 0x%08"PRIx32"\n", d->domain_id, addr); + printk("dom%d VA 0x%08"PRIvaddr"\n", d->domain_id, addr); printk(" TTBCR: 0x%08"PRIx32"\n", ttbcr); printk(" TTBR0: 0x%08"PRIx32" = 0x%"PRIpaddr"\n", ttbr0, p2m_lookup(d, ttbr0 & PAGE_MASK)); @@ -692,7 +692,11 @@ static void do_trap_data_abort_guest(struct cpu_user_regs *regs, mmio_info_t info; info.dabt = dabt; +#ifdef CONFIG_ARM_32 info.gva = READ_CP32(HDFAR); +#else + info.gva = READ_SYSREG64(FAR_EL2); +#endif if (dabt.s1ptw) goto bad_data_abort; @@ -713,7 +717,7 @@ bad_data_abort: /* XXX inject a suitable fault into the guest */ printk("Guest data abort: %s%s%s\n" - " gva=%"PRIx32"\n", + " gva=%"PRIvaddr"\n", msg, dabt.s1ptw ? " S2 during S1" : "", fsc_level_str(level), info.gva); @@ -736,13 +740,17 @@ bad_data_abort: asmlinkage void do_trap_hypervisor(struct cpu_user_regs *regs) { - union hsr hsr = { .bits = READ_CP32(HSR) }; + union hsr hsr = { .bits = READ_SYSREG32(ESR_EL2) }; switch (hsr.ec) { case HSR_EC_CP15_32: + if ( ! is_pv32_domain(current->domain) ) + goto bad_trap; do_cp15_32(regs, hsr); break; case HSR_EC_CP15_64: + if ( ! is_pv32_domain(current->domain) ) + goto bad_trap; do_cp15_64(regs, hsr); break; case HSR_EC_HVC: @@ -754,6 +762,7 @@ asmlinkage void do_trap_hypervisor(struct cpu_user_regs *regs) do_trap_data_abort_guest(regs, hsr.dabt); break; default: + bad_trap: printk("Hypervisor Trap. HSR=0x%x EC=0x%x IL=%x Syndrome=%"PRIx32"\n", hsr.bits, hsr.ec, hsr.len, hsr.iss); do_unexpected_trap("Hypervisor", regs); diff --git a/xen/include/asm-arm/cpregs.h b/xen/include/asm-arm/cpregs.h index 36da12e..75b6287 100644 --- a/xen/include/asm-arm/cpregs.h +++ b/xen/include/asm-arm/cpregs.h @@ -228,6 +228,7 @@ #define CCSIDR_EL1 CCSIDR #define CLIDR_EL1 CLIDR #define CSSELR_EL1 CSSELR +#define ESR_EL2 HSR #define ID_AFR0_EL1 ID_AFR0 #define ID_DFR0_EL1 ID_DFR0 #define ID_ISAR0_EL1 ID_ISAR0 diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h index bd473a8..6ab466a 100644 --- a/xen/include/asm-arm/processor.h +++ b/xen/include/asm-arm/processor.h @@ -238,7 +238,7 @@ union hsr { #endif #ifndef __ASSEMBLY__ -extern uint32_t hyp_traps_vector[8]; +extern uint32_t hyp_traps_vector[]; void panic_PAR(uint64_t par); -- 1.7.2.5
At 15:49 +0000 on 21 Feb (1361461781), Ian Campbell wrote:> On Thu, 2013-02-21 at 15:25 +0000, Ian Campbell wrote: > > On Thu, 2013-02-21 at 15:10 +0000, Tim Deegan wrote: > > > At 16:47 +0000 on 14 Feb (1360860461), Ian Campbell wrote: > > > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > > > > --- > > > > v2: Call leave_hypervisor_tail on exit back to guest, disable interrupts while > > > > restoring state. > > > > > > You don''t seem to have addressed my other comments on v1: > > > > I''ve got them in v3, I noted that I hadn''t addresses you comment on this > > patch in the #0/46. > > Here is v3. Needs "xen: consolidate implementations of LOG() macro" > which I''ve just posted. > > Ian. > > 8<-------------------------------------------------- > > From 6978a03e10316ff997c91ccd6f88be110dfcffec Mon Sep 17 00:00:00 2001 > From: Ian Campbell <ian.campbell@citrix.com> > Date: Mon, 21 Jan 2013 17:33:31 +0000 > Subject: [PATCH] xen: arm: arm64 trap handling. > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>Acked-by: Tim Deegan <tim@xen.org>
Ian Campbell
2013-Feb-21 15:58 UTC
Re: [PATCH V2 14/46] xen: arm64: barriers and wait for interrupts/events
On Thu, 2013-02-21 at 15:27 +0000, Ian Campbell wrote:> On Thu, 2013-02-21 at 15:01 +0000, Tim Deegan wrote: > > At 16:47 +0000 on 14 Feb (1360860448), Ian Campbell wrote: > > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > > > > Acked-by: Tim Deegan <tim@xen.org> > > > > Were we also talking about having smb_ barriers equivalent to the normas > > ones, like on x86? > > Yes, I think in a F2F conversation which is why I forgot.FYI it ended up like this. I retained your Ack, hope that''s ok. 8<-------------------------------- From 117f08d439bca2798db71b9971429e32424ad092 Mon Sep 17 00:00:00 2001 From: Ian Campbell <ian.campbell@citrix.com> Date: Thu, 13 Dec 2012 13:18:07 +0000 Subject: [PATCH] xen: arm64: barriers and wait for interrupts/events Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Tim Deegan <tim@xen.org> --- v3: - smp barriers are the same as up (which are conservative) - add dmb --- xen/include/asm-arm/arm32/system.h | 29 +++++++++++++++++++++++++++++ xen/include/asm-arm/arm64/system.h | 29 +++++++++++++++++++++++++++++ xen/include/asm-arm/system.h | 20 ++++++++------------ 3 files changed, 66 insertions(+), 12 deletions(-) create mode 100644 xen/include/asm-arm/arm32/system.h create mode 100644 xen/include/asm-arm/arm64/system.h diff --git a/xen/include/asm-arm/arm32/system.h b/xen/include/asm-arm/arm32/system.h new file mode 100644 index 0000000..91098a0 --- /dev/null +++ b/xen/include/asm-arm/arm32/system.h @@ -0,0 +1,29 @@ +/* Portions taken from Linux arch arm */ +#ifndef __ASM_ARM32_SYSTEM_H +#define __ASM_ARM32_SYSTEM_H + +#define sev() __asm__ __volatile__ ("sev" : : : "memory") +#define wfe() __asm__ __volatile__ ("wfe" : : : "memory") +#define wfi() __asm__ __volatile__ ("wfi" : : : "memory") + +#define isb() __asm__ __volatile__ ("isb" : : : "memory") +#define dsb() __asm__ __volatile__ ("dsb" : : : "memory") +#define dmb() __asm__ __volatile__ ("dmb" : : : "memory") + +#define mb() dsb() +#define rmb() dsb() +#define wmb() mb() + +#define smp_mb() dmb() +#define smp_rmb() dmb() +#define smp_wmb() dmb() + +#endif +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/include/asm-arm/arm64/system.h b/xen/include/asm-arm/arm64/system.h new file mode 100644 index 0000000..b3ea4a3 --- /dev/null +++ b/xen/include/asm-arm/arm64/system.h @@ -0,0 +1,29 @@ +/* Portions taken from Linux arch arm64 */ +#ifndef __ASM_ARM64_SYSTEM_H +#define __ASM_ARM64_SYSTEM_H + +#define sev() asm volatile("sev" : : : "memory") +#define wfe() asm volatile("wfe" : : : "memory") +#define wfi() asm volatile("wfi" : : : "memory") + +#define isb() asm volatile("isb" : : : "memory") +#define dsb() asm volatile("dsb sy" : : : "memory") +#define dmb() asm volatile("dmb sy" : : : "memory") + +#define mb() dsb() +#define rmb() dsb() +#define wmb() mb() + +#define smp_mb() dmb() +#define smp_rmb() dmb() +#define smp_wmb() dmb() + +#endif +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/include/asm-arm/system.h b/xen/include/asm-arm/system.h index 216ef1f..8b4c97a 100644 --- a/xen/include/asm-arm/system.h +++ b/xen/include/asm-arm/system.h @@ -11,18 +11,6 @@ #define xchg(ptr,x) \ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) -#define isb() __asm__ __volatile__ ("isb" : : : "memory") -#define dsb() __asm__ __volatile__ ("dsb" : : : "memory") -#define dmb() __asm__ __volatile__ ("dmb" : : : "memory") - -#define mb() dsb() -#define rmb() dsb() -#define wmb() mb() - -#define smp_mb() dmb() -#define smp_rmb() dmb() -#define smp_wmb() dmb() - /* * This is used to ensure the compiler did actually allocate the register we * asked it for some inline assembly sequences. Apparently we can''t trust @@ -33,6 +21,14 @@ */ #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" +#if defined(CONFIG_ARM_32) +# include <asm/arm32/system.h> +#elif defined(CONFIG_ARM_64) +# include <asm/arm64/system.h> +#else +# error "unknown ARM variant" +#endif + extern void __bad_xchg(volatile void *, int); static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) -- 1.7.2.5
Ian Campbell
2013-Feb-21 16:01 UTC
Re: [PATCH V2 23/46] xen: arm: add register_t type, native register size for the hypervisor
On Thu, 2013-02-14 at 16:47 +0000, Ian Campbell wrote:> Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > Acked-by: Tim Deegan <tim@xen.org> > but: > This is mostly a matter of coding taste, so I''d like Stefano''s > ack/nack here as well.Stefano, any strong opinion?> --- > --- > xen/arch/arm/domain_build.c | 2 +- > xen/arch/arm/smpboot.c | 2 +- > xen/arch/arm/traps.c | 44 ++++++++++++++++++++++-------------------- > xen/arch/arm/vgic.c | 18 ++++++++-------- > xen/arch/arm/vpl011.c | 6 ++-- > xen/arch/arm/vtimer.c | 6 ++-- > xen/include/asm-arm/regs.h | 2 +- > xen/include/asm-arm/types.h | 4 +++ > 8 files changed, 45 insertions(+), 39 deletions(-) > > diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c > index 7403f1a..30d014a 100644 > --- a/xen/arch/arm/domain_build.c > +++ b/xen/arch/arm/domain_build.c > @@ -268,7 +268,7 @@ static int prepare_dtb(struct domain *d, struct kernel_info *kinfo) > > static void dtb_load(struct kernel_info *kinfo) > { > - void * __user dtb_virt = (void *)(u32)kinfo->dtb_paddr; > + void * __user dtb_virt = (void *)(register_t)kinfo->dtb_paddr; > > raw_copy_to_guest(dtb_virt, kinfo->fdt, fdt_totalsize(kinfo->fdt)); > xfree(kinfo->fdt); > diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c > index 86379b7..d8eb5d3 100644 > --- a/xen/arch/arm/smpboot.c > +++ b/xen/arch/arm/smpboot.c > @@ -142,7 +142,7 @@ void __cpuinit start_secondary(unsigned long boot_phys_offset, > set_processor_id(cpuid); > > /* Setup Hyp vector base */ > - WRITE_CP32((uint32_t) hyp_traps_vector, HVBAR); > + WRITE_CP32((register_t) hyp_traps_vector, HVBAR); > > mmu_init_secondary_cpu(); > enable_vfp(); > diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c > index eaf1f52..0299b33 100644 > --- a/xen/arch/arm/traps.c > +++ b/xen/arch/arm/traps.c > @@ -68,7 +68,7 @@ static void print_xen_info(void) > debug_build() ? ''y'' : ''n'', print_tainted(taint_str)); > } > > -uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg) > +register_t *select_user_reg(struct cpu_user_regs *regs, int reg) > { > BUG_ON( !guest_mode(regs) ); > > @@ -81,20 +81,20 @@ uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg) > > switch ( reg ) { > case 0 ... 7: /* Unbanked registers */ > - BUILD_BUG_ON(REGOFFS(r0) + 7*sizeof(uint32_t) != REGOFFS(r7)); > + BUILD_BUG_ON(REGOFFS(r0) + 7*sizeof(register_t) != REGOFFS(r7)); > return ®s->r0 + reg; > case 8 ... 12: /* Register banked in FIQ mode */ > - BUILD_BUG_ON(REGOFFS(r8_fiq) + 4*sizeof(uint32_t) != REGOFFS(r12_fiq)); > + BUILD_BUG_ON(REGOFFS(r8_fiq) + 4*sizeof(register_t) != REGOFFS(r12_fiq)); > if ( fiq_mode(regs) ) > return ®s->r8_fiq + reg - 8; > else > return ®s->r8 + reg - 8; > case 13 ... 14: /* Banked SP + LR registers */ > - BUILD_BUG_ON(REGOFFS(sp_fiq) + 1*sizeof(uint32_t) != REGOFFS(lr_fiq)); > - BUILD_BUG_ON(REGOFFS(sp_irq) + 1*sizeof(uint32_t) != REGOFFS(lr_irq)); > - BUILD_BUG_ON(REGOFFS(sp_svc) + 1*sizeof(uint32_t) != REGOFFS(lr_svc)); > - BUILD_BUG_ON(REGOFFS(sp_abt) + 1*sizeof(uint32_t) != REGOFFS(lr_abt)); > - BUILD_BUG_ON(REGOFFS(sp_und) + 1*sizeof(uint32_t) != REGOFFS(lr_und)); > + BUILD_BUG_ON(REGOFFS(sp_fiq) + 1*sizeof(register_t) != REGOFFS(lr_fiq)); > + BUILD_BUG_ON(REGOFFS(sp_irq) + 1*sizeof(register_t) != REGOFFS(lr_irq)); > + BUILD_BUG_ON(REGOFFS(sp_svc) + 1*sizeof(register_t) != REGOFFS(lr_svc)); > + BUILD_BUG_ON(REGOFFS(sp_abt) + 1*sizeof(register_t) != REGOFFS(lr_abt)); > + BUILD_BUG_ON(REGOFFS(sp_und) + 1*sizeof(register_t) != REGOFFS(lr_und)); > switch ( regs->cpsr & PSR_MODE_MASK ) > { > case PSR_MODE_USR: > @@ -315,11 +315,11 @@ static void show_guest_stack(struct cpu_user_regs *regs) > printk("GUEST STACK GOES HERE\n"); > } > > -#define STACK_BEFORE_EXCEPTION(regs) ((uint32_t*)(regs)->sp) > +#define STACK_BEFORE_EXCEPTION(regs) ((register_t*)(regs)->sp) > > static void show_trace(struct cpu_user_regs *regs) > { > - uint32_t *frame, next, addr, low, high; > + register_t *frame, next, addr, low, high; > > printk("Xen call trace:\n "); > > @@ -327,7 +327,7 @@ static void show_trace(struct cpu_user_regs *regs) > print_symbol(" %s\n ", regs->pc); > > /* Bounds for range of valid frame pointer. */ > - low = (uint32_t)(STACK_BEFORE_EXCEPTION(regs)/* - 2*/); > + low = (register_t)(STACK_BEFORE_EXCEPTION(regs)/* - 2*/); > high = (low & ~(STACK_SIZE - 1)) + > (STACK_SIZE - sizeof(struct cpu_info)); > > @@ -356,7 +356,7 @@ static void show_trace(struct cpu_user_regs *regs) > break; > { > /* Ordinary stack frame. */ > - frame = (uint32_t *)next; > + frame = (register_t *)next; > next = frame[-1]; > addr = frame[0]; > } > @@ -364,7 +364,7 @@ static void show_trace(struct cpu_user_regs *regs) > printk("[<%p>]", _p(addr)); > print_symbol(" %s\n ", addr); > > - low = (uint32_t)&frame[1]; > + low = (register_t)&frame[1]; > } > > printk("\n"); > @@ -372,7 +372,7 @@ static void show_trace(struct cpu_user_regs *regs) > > void show_stack(struct cpu_user_regs *regs) > { > - uint32_t *stack = STACK_BEFORE_EXCEPTION(regs), addr; > + register_t *stack = STACK_BEFORE_EXCEPTION(regs), addr; > int i; > > if ( guest_mode(regs) ) > @@ -486,20 +486,22 @@ static arm_hypercall_t arm_hypercall_table[] = { > > static void do_debug_trap(struct cpu_user_regs *regs, unsigned int code) > { > - uint32_t reg, *r; > + register_t *r; > + uint32_t reg; > uint32_t domid = current->domain->domain_id; > switch ( code ) { > case 0xe0 ... 0xef: > reg = code - 0xe0; > r = select_user_reg(regs, reg); > - printk("DOM%d: R%d = %#010"PRIx32" at %#010"PRIx32"\n", > + printk("DOM%d: R%d = 0x%"PRIregister" at 0x%"PRIvaddr"\n", > domid, reg, *r, regs->pc); > break; > case 0xfd: > - printk("DOM%d: Reached %#010"PRIx32"\n", domid, regs->pc); > + printk("DOM%d: Reached %"PRIvaddr"\n", domid, regs->pc); > break; > case 0xfe: > - printk("%c", (char)(regs->r0 & 0xff)); > + r = select_user_reg(regs, 0); > + printk("%c", (char)(*r & 0xff)); > break; > case 0xff: > printk("DOM%d: DEBUG\n", domid); > @@ -561,7 +563,7 @@ static void do_cp15_32(struct cpu_user_regs *regs, > union hsr hsr) > { > struct hsr_cp32 cp32 = hsr.cp32; > - uint32_t *r = select_user_reg(regs, cp32.reg); > + uint32_t *r = (uint32_t*)select_user_reg(regs, cp32.reg); > > if ( !cp32.ccvalid ) { > dprintk(XENLOG_ERR, "cp_15(32): need to handle invalid condition codes\n"); > @@ -607,7 +609,7 @@ static void do_cp15_32(struct cpu_user_regs *regs, > BUG_ON(!vtimer_emulate(regs, hsr)); > break; > default: > - printk("%s p15, %d, r%d, cr%d, cr%d, %d @ %#08x\n", > + printk("%s p15, %d, r%d, cr%d, cr%d, %d @ 0x%"PRIregister"\n", > cp32.read ? "mrc" : "mcr", > cp32.op1, cp32.reg, cp32.crn, cp32.crm, cp32.op2, regs->pc); > panic("unhandled 32-bit CP15 access %#x\n", hsr.bits & HSR_CP32_REGS_MASK); > @@ -637,7 +639,7 @@ static void do_cp15_64(struct cpu_user_regs *regs, > BUG_ON(!vtimer_emulate(regs, hsr)); > break; > default: > - printk("%s p15, %d, r%d, r%d, cr%d @ %#08x\n", > + printk("%s p15, %d, r%d, r%d, cr%d @ 0x%"PRIregister"\n", > cp64.read ? "mrrc" : "mcrr", > cp64.op1, cp64.reg1, cp64.reg2, cp64.crm, regs->pc); > panic("unhandled 64-bit CP15 access %#x\n", hsr.bits & HSR_CP64_REGS_MASK); > diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c > index 39b9775..57147d5 100644 > --- a/xen/arch/arm/vgic.c > +++ b/xen/arch/arm/vgic.c > @@ -160,7 +160,7 @@ static int vgic_distr_mmio_read(struct vcpu *v, mmio_info_t *info) > { > struct hsr_dabt dabt = info->dabt; > struct cpu_user_regs *regs = guest_cpu_user_regs(); > - uint32_t *r = select_user_reg(regs, dabt.reg); > + register_t *r = select_user_reg(regs, dabt.reg); > struct vgic_irq_rank *rank; > int offset = (int)(info->gpa - VGIC_DISTR_BASE_ADDRESS); > int gicd_reg = REG(offset); > @@ -372,7 +372,7 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info) > { > struct hsr_dabt dabt = info->dabt; > struct cpu_user_regs *regs = guest_cpu_user_regs(); > - uint32_t *r = select_user_reg(regs, dabt.reg); > + register_t *r = select_user_reg(regs, dabt.reg); > struct vgic_irq_rank *rank; > int offset = (int)(info->gpa - VGIC_DISTR_BASE_ADDRESS); > int gicd_reg = REG(offset); > @@ -421,13 +421,13 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info) > > case GICD_ISPENDR ... GICD_ISPENDRN: > if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width; > - printk("vGICD: unhandled %s write %#"PRIx32" to ISPENDR%d\n", > + printk("vGICD: unhandled %s write %#"PRIregister" to ISPENDR%d\n", > dabt.size ? "word" : "byte", *r, gicd_reg - GICD_ISPENDR); > return 0; > > case GICD_ICPENDR ... GICD_ICPENDRN: > if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width; > - printk("vGICD: unhandled %s write %#"PRIx32" to ICPENDR%d\n", > + printk("vGICD: unhandled %s write %#"PRIregister" to ICPENDR%d\n", > dabt.size ? "word" : "byte", *r, gicd_reg - GICD_ICPENDR); > return 0; > > @@ -499,19 +499,19 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info) > > case GICD_SGIR: > if ( dabt.size != 2 ) goto bad_width; > - printk("vGICD: unhandled write %#"PRIx32" to ICFGR%d\n", > + printk("vGICD: unhandled write %#"PRIregister" to ICFGR%d\n", > *r, gicd_reg - GICD_ICFGR); > return 0; > > case GICD_CPENDSGIR ... GICD_CPENDSGIRN: > if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width; > - printk("vGICD: unhandled %s write %#"PRIx32" to ICPENDSGIR%d\n", > + printk("vGICD: unhandled %s write %#"PRIregister" to ICPENDSGIR%d\n", > dabt.size ? "word" : "byte", *r, gicd_reg - GICD_CPENDSGIR); > return 0; > > case GICD_SPENDSGIR ... GICD_SPENDSGIRN: > if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width; > - printk("vGICD: unhandled %s write %#"PRIx32" to ISPENDSGIR%d\n", > + printk("vGICD: unhandled %s write %#"PRIregister" to ISPENDSGIR%d\n", > dabt.size ? "word" : "byte", *r, gicd_reg - GICD_SPENDSGIR); > return 0; > > @@ -537,13 +537,13 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info) > goto write_ignore; > > default: > - printk("vGICD: unhandled write r%d=%"PRIx32" offset %#08x\n", > + printk("vGICD: unhandled write r%d=%"PRIregister" offset %#08x\n", > dabt.reg, *r, offset); > return 0; > } > > bad_width: > - printk("vGICD: bad write width %d r%d=%"PRIx32" offset %#08x\n", > + printk("vGICD: bad write width %d r%d=%"PRIregister" offset %#08x\n", > dabt.size, dabt.reg, *r, offset); > domain_crash_synchronous(); > return 0; > diff --git a/xen/arch/arm/vpl011.c b/xen/arch/arm/vpl011.c > index 7dcee90..db5094e 100644 > --- a/xen/arch/arm/vpl011.c > +++ b/xen/arch/arm/vpl011.c > @@ -92,7 +92,7 @@ static int uart0_mmio_read(struct vcpu *v, mmio_info_t *info) > { > struct hsr_dabt dabt = info->dabt; > struct cpu_user_regs *regs = guest_cpu_user_regs(); > - uint32_t *r = select_user_reg(regs, dabt.reg); > + register_t *r = select_user_reg(regs, dabt.reg); > int offset = (int)(info->gpa - UART0_START); > > switch ( offset ) > @@ -114,7 +114,7 @@ static int uart0_mmio_write(struct vcpu *v, mmio_info_t *info) > { > struct hsr_dabt dabt = info->dabt; > struct cpu_user_regs *regs = guest_cpu_user_regs(); > - uint32_t *r = select_user_reg(regs, dabt.reg); > + register_t *r = select_user_reg(regs, dabt.reg); > int offset = (int)(info->gpa - UART0_START); > > switch ( offset ) > @@ -127,7 +127,7 @@ static int uart0_mmio_write(struct vcpu *v, mmio_info_t *info) > /* Silently ignore */ > return 1; > default: > - printk("VPL011: unhandled write r%d=%"PRIx32" offset %#08x\n", > + printk("VPL011: unhandled write r%d=%"PRIregister" offset %#08x\n", > dabt.reg, *r, offset); > domain_crash_synchronous(); > } > diff --git a/xen/arch/arm/vtimer.c b/xen/arch/arm/vtimer.c > index 85201b5..291b87e 100644 > --- a/xen/arch/arm/vtimer.c > +++ b/xen/arch/arm/vtimer.c > @@ -99,7 +99,7 @@ static int vtimer_emulate_32(struct cpu_user_regs *regs, union hsr hsr) > { > struct vcpu *v = current; > struct hsr_cp32 cp32 = hsr.cp32; > - uint32_t *r = select_user_reg(regs, cp32.reg); > + uint32_t *r = (uint32_t *)select_user_reg(regs, cp32.reg); > s_time_t now; > > switch ( hsr.bits & HSR_CP32_REGS_MASK ) > @@ -151,8 +151,8 @@ static int vtimer_emulate_64(struct cpu_user_regs *regs, union hsr hsr) > { > struct vcpu *v = current; > struct hsr_cp64 cp64 = hsr.cp64; > - uint32_t *r1 = select_user_reg(regs, cp64.reg1); > - uint32_t *r2 = select_user_reg(regs, cp64.reg2); > + uint32_t *r1 = (uint32_t *)select_user_reg(regs, cp64.reg1); > + uint32_t *r2 = (uint32_t *)select_user_reg(regs, cp64.reg2); > uint64_t ticks; > s_time_t now; > > diff --git a/xen/include/asm-arm/regs.h b/xen/include/asm-arm/regs.h > index 7486944..a723f92 100644 > --- a/xen/include/asm-arm/regs.h > +++ b/xen/include/asm-arm/regs.h > @@ -34,7 +34,7 @@ > * Returns a pointer to the given register value in regs, taking the > * processor mode (CPSR) into account. > */ > -extern uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg); > +extern register_t *select_user_reg(struct cpu_user_regs *regs, int reg); > > #endif /* __ARM_REGS_H__ */ > /* > diff --git a/xen/include/asm-arm/types.h b/xen/include/asm-arm/types.h > index d3e16d8..9ca32f1 100644 > --- a/xen/include/asm-arm/types.h > +++ b/xen/include/asm-arm/types.h > @@ -41,6 +41,8 @@ typedef u32 vaddr_t; > typedef u64 paddr_t; > #define INVALID_PADDR (~0ULL) > #define PRIpaddr "016llx" > +typedef u32 register_t; > +#define PRIregister "x" > #elif defined (CONFIG_ARM_64) > typedef signed long s64; > typedef unsigned long u64; > @@ -49,6 +51,8 @@ typedef u64 vaddr_t; > typedef u64 paddr_t; > #define INVALID_PADDR (~0UL) > #define PRIpaddr "016lx" > +typedef u64 register_t; > +#define PRIregister "lx" > #endif > > typedef unsigned long size_t; > -- > 1.7.2.5 >
On Thu, 2013-02-21 at 15:36 +0000, Tim Deegan wrote:> At 15:25 +0000 on 21 Feb (1361460324), Ian Campbell wrote: > > On Thu, 2013-02-21 at 15:10 +0000, Tim Deegan wrote: > > > At 16:47 +0000 on 14 Feb (1360860461), Ian Campbell wrote: > > > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > > > > --- > > > > v2: Call leave_hypervisor_tail on exit back to guest, disable interrupts while > > > > restoring state. > > > > > > You don''t seem to have addressed my other comments on v1: > > > > I''ve got them in v3, I noted that I hadn''t addresses you comment on this > > patch in the #0/46. > > So you did; I did read the 0/46, but for some reason all that stuck in > my head was the WFE stuff. > > AFAICS you just need to re-roll this and #25,I don''t think you mean #25? That is "xen: arm64: add guest type to domain field." which you''ve acked. I had an outstanding comment for #14 "xen: arm64: barriers and wait for interrupts/events" which I''ve just addressed (and reposted)> and get a tools-person to ack #20.Stefano perhaps? ;-)> So for v3, can you just send those, and avoid another 46-patch > mailbomb? :)When I''m applying my own patches I prefer to do it from the list rather than short cutting them from my own tree, keep me honest/from making mistakes. How about I include an index of acked/unacked patches in the zeroeth mail? You ought to be able to just mark it all as read. Ian.
Ian Campbell
2013-Feb-21 16:03 UTC
Re: [PATCH V2 05/46] xen: arm64: initial build + config changes, start of day code
On Thu, 2013-02-21 at 15:26 +0000, Ian Campbell wrote:> On Thu, 2013-02-21 at 14:56 +0000, Tim Deegan wrote: > > At 16:47 +0000 on 14 Feb (1360860439), Ian Campbell wrote: > > > +2: PRINT("- Started in Hyp mode -\r\n") > > > + > > > +hyp: > > > > I though we were going to use "EL3" instead of "Hyp". > > Sorry, looks like I missed a few comments when I went through this one.s/EL3/EL2/g This is what I have now: 8<----------------------------------------- From e4587a06df0d04ccbfd04ec7cc371900fe7dabf4 Mon Sep 17 00:00:00 2001 From: Ian Campbell <ian.campbell@citrix.com> Date: Mon, 10 Dec 2012 14:19:00 +0000 Subject: [PATCH] xen: arm64: initial build + config changes, start of day code Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- v3: - s/hyp/el2/ - remove dead code - fix comment formatting v2: - Add PSR_MODE definitions for 64-bit to arch-arm.h and use instead of defining in head.S - Nuke hard tabs in head.S and mode_switch.S with expand(1) --- Config.mk | 2 +- config/arm64.mk | 12 ++ xen/arch/arm/Makefile | 1 + xen/arch/arm/Rules.mk | 6 + xen/arch/arm/arm64/Makefile | 1 + xen/arch/arm/arm64/head.S | 393 ++++++++++++++++++++++++++++++++++++++ xen/arch/arm/arm64/mode_switch.S | 83 ++++++++ xen/arch/arm/xen.lds.S | 8 +- xen/include/asm-arm/page.h | 1 + xen/include/public/arch-arm.h | 14 ++ xen/include/public/hvm/save.h | 2 +- xen/include/public/xen.h | 2 +- xen/include/xen/libelf.h | 2 +- 13 files changed, 522 insertions(+), 5 deletions(-) create mode 100644 config/arm64.mk create mode 100644 xen/arch/arm/arm64/Makefile create mode 100644 xen/arch/arm/arm64/head.S create mode 100644 xen/arch/arm/arm64/mode_switch.S diff --git a/Config.mk b/Config.mk index 64541c8..ea64925 100644 --- a/Config.mk +++ b/Config.mk @@ -15,7 +15,7 @@ debug_symbols ?= $(debug) XEN_COMPILE_ARCH ?= $(shell uname -m | sed -e s/i.86/x86_32/ \ -e s/i86pc/x86_32/ -e s/amd64/x86_64/ \ - -e s/armv7.*/arm32/) + -e s/armv7.*/arm32/ -e s/armv8.*/arm64/) XEN_TARGET_ARCH ?= $(XEN_COMPILE_ARCH) XEN_OS ?= $(shell uname -s) diff --git a/config/arm64.mk b/config/arm64.mk new file mode 100644 index 0000000..b2457eb --- /dev/null +++ b/config/arm64.mk @@ -0,0 +1,12 @@ +CONFIG_ARM := y +CONFIG_ARM_64 := y +CONFIG_ARM_$(XEN_OS) := y + +CFLAGS += #-marm -march= -mcpu= etc + +HAS_PL011 := y + +# Use only if calling $(LD) directly. +LDFLAGS_DIRECT += -maarch64elf + +CONFIG_LOAD_ADDRESS ?= 0x80000000 diff --git a/xen/arch/arm/Makefile b/xen/arch/arm/Makefile index f2822f2..7ff67c7 100644 --- a/xen/arch/arm/Makefile +++ b/xen/arch/arm/Makefile @@ -1,4 +1,5 @@ subdir-$(arm32) += arm32 +subdir-$(arm64) += arm64 obj-y += early_printk.o obj-y += domain.o diff --git a/xen/arch/arm/Rules.mk b/xen/arch/arm/Rules.mk index 5b5768a..29b605d 100644 --- a/xen/arch/arm/Rules.mk +++ b/xen/arch/arm/Rules.mk @@ -26,6 +26,12 @@ arm32 := y arm64 := n endif +ifeq ($(TARGET_SUBARCH),arm64) +CFLAGS += -mcpu=generic +arm32 := n +arm64 := y +endif + ifneq ($(call cc-option,$(CC),-fvisibility=hidden,n),n) CFLAGS += -DGCC_HAS_VISIBILITY_ATTRIBUTE endif diff --git a/xen/arch/arm/arm64/Makefile b/xen/arch/arm/arm64/Makefile new file mode 100644 index 0000000..dffbeb1 --- /dev/null +++ b/xen/arch/arm/arm64/Makefile @@ -0,0 +1 @@ +obj-y += mode_switch.o diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S new file mode 100644 index 0000000..b7ab251 --- /dev/null +++ b/xen/arch/arm/arm64/head.S @@ -0,0 +1,393 @@ +/* + * xen/arch/arm/head.S + * + * Start-of-day code for an ARMv8. + * + * Ian Campbell <ian.campbell@citrix.com> + * Copyright (c) 2012 Citrix Systems. + * + * Based on ARMv7-A head.S by + * Tim Deegan <tim@xen.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <asm/config.h> +#include <asm/page.h> +#include <asm/asm_defns.h> + +#define PT_PT 0xe7f /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=111 T=1 P=1 */ +#define PT_MEM 0xe7d /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=111 T=0 P=1 */ +#define PT_DEV 0xe71 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=0 P=1 */ +#define PT_DEV_L3 0xe73 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=1 P=1 */ + +/* Macro to print a string to the UART, if there is one. + * Clobbers r0-r3. */ +#ifdef EARLY_UART_ADDRESS +#define PRINT(_s) \ + adr x0, 98f ; \ + bl puts ; \ + b 99f ; \ +98: .asciz _s ; \ + .align 2 ; \ +99: +#else +#define PRINT(s) +#endif + + /*.aarch64*/ + + /* + * Kernel startup entry point. + * --------------------------- + * + * The requirements are: + * MMU = off, D-cache = off, I-cache = on or off, + * x0 = physical address to the FDT blob. + * + * This must be the very first address in the loaded image. + * It should be linked at XEN_VIRT_START, and loaded at any + * 2MB-aligned address. All of text+data+bss must fit in 2MB, + * or the initial pagetable code below will need adjustment. + */ + + .global start +start: + /* + * DO NOT MODIFY. Image header expected by Linux boot-loaders. + */ + b real_start /* branch to kernel start, magic */ + .long 0 /* reserved */ + .quad 0 /* Image load offset from start of RAM */ + .quad 0 /* reserved */ + .quad 0 /* reserved */ + +real_start: + msr DAIFSet, 0xf /* Disable all interrupts */ + + /* Save the bootloader arguments in less-clobberable registers */ + mov x21, x0 /* x21 := DTB, physical address */ + + /* Find out where we are */ + ldr x0, =start + adr x19, start /* x19 := paddr (start) */ + sub x20, x19, x0 /* x20 := phys-offset */ + + /* Using the DTB in the .dtb section? */ +#ifdef CONFIG_DTB_FILE + ldr x21, =_sdtb + add x21, x21, x20 /* x21 := paddr(DTB) */ +#endif + + /* Are we the boot CPU? */ + mov x22, #0 /* x22 := CPU ID */ + mrs x0, mpidr_el1 + tbz x0, 31, boot_cpu /* Multiprocessor extension supported? */ + tbnz x0, 30, boot_cpu /* Uniprocessor system? */ + + mov x13, #(0xff << 24) + bics x22, x0, x13 /* Mask out flags to get CPU ID */ + b.eq boot_cpu /* If we''re CPU 0, boot now */ + + /* Non-boot CPUs wait here to be woken up one at a time. */ +1: dsb sy + ldr x0, =smp_up_cpu /* VA of gate */ + add x0, x0, x20 /* PA of gate */ + ldr x1, [x0] /* Which CPU is being booted? */ + cmp x1, x22 /* Is it us? */ + b.eq 2f + wfe + b 1b +2: + +boot_cpu: +#ifdef EARLY_UART_ADDRESS + ldr x23, =EARLY_UART_ADDRESS /* x23 := UART base address */ + cbnz x22, 1f + bl init_uart /* CPU 0 sets up the UART too */ +1: PRINT("- CPU ") + mov x0, x22 + bl putn + PRINT(" booting -\r\n") +#endif + + PRINT("- Current EL ") + mrs x0, CurrentEL + bl putn + PRINT(" -\r\n") + + /* Are we in EL3 */ + mrs x0, CurrentEL + cmp x0, #PSR_MODE_EL3t + ccmp x0, #PSR_MODE_EL3h, #0x4, ne + b.eq 1f /* Yes */ + + /* Are we in EL2 */ + cmp x0, #PSR_MODE_EL2t + ccmp x0, #PSR_MODE_EL2h, #0x4, ne + b.eq 2f /* Yes */ + + /* Otherwise, it must have been EL0 or EL1 */ + PRINT("- CPU is not in EL3 or EL2 -\r\n") + b fail + +1: PRINT("- Started in EL3 -\r\n- Entering EL2 -\r\n") + ldr x1, =enter_el2_mode /* VA of function */ + add x1, x1, x20 /* PA of function */ + adr x30, el2 /* Set return address for call */ + br x1 /* Call function */ + +2: PRINT("- Started in EL2 mode -\r\n") + +el2: + /* Zero BSS On the boot CPU to avoid nasty surprises */ + cbnz x22, skip_bss + + PRINT("- Zero BSS -\r\n") + ldr x0, =__bss_start /* Load start & end of bss */ + ldr x1, =__bss_end + add x0, x0, x20 /* Apply physical offset */ + add x1, x1, x20 + +1: str xzr, [x0], #8 + cmp x0, x1 + b.lo 1b + +skip_bss: + + PRINT("- Setting up control registers -\r\n") + + /* Set up memory attribute type tables */ + ldr x0, =MAIRVAL + msr mair_el2, x0 + + /* Set up the HTCR: + * PASize -- 4G + * Top byte is used + * PT walks use Outer-Shareable accesses, + * PT walks are write-back, no-write-allocate in both cache levels, + * Full 64-bit address space goes through this table. */ + ldr x0, =0x80802500 + msr tcr_el2, x0 + + /* Set up the HSCTLR: + * Exceptions in LE ARM, + * Low-latency IRQs disabled, + * Write-implies-XN disabled (for now), + * D-cache disabled (for now), + * I-cache enabled, + * Alignment checking enabled, + * MMU translation disabled (for now). */ + ldr x0, =(HSCTLR_BASE|SCTLR_A) + msr SCTLR_EL2, x0 + + /* Write Xen''s PT''s paddr into the HTTBR */ + ldr x4, =xen_pgtable + add x4, x4, x20 /* x4 := paddr (xen_pagetable) */ + msr TTBR0_EL2, x4 + + /* Non-boot CPUs don''t need to rebuild the pagetable */ + cbnz x22, pt_ready + + ldr x1, =xen_first + add x1, x1, x20 /* x1 := paddr (xen_first) */ + mov x3, #PT_PT /* x2 := table map of xen_first */ + orr x2, x1, x3 /* (+ rights for linear PT) */ + str x2, [x4, #0] /* Map it in slot 0 */ + + mov x4, x1 /* Next level into xen_first */ + + /* console fixmap */ +#ifdef EARLY_UART_ADDRESS + ldr x1, =xen_fixmap + add x1, x1, x20 /* x1 := paddr (xen_fixmap) */ + lsr x2, x23, #12 + lsl x2, x2, #12 /* 4K aligned paddr of UART */ + mov x3, #PT_DEV_L3 + orr x2, x2, x3 /* x2 := 4K dev map including UART */ + str x2, [x1, #(FIXMAP_CONSOLE*8)] /* Map it in the first fixmap''s slot */ +#endif + + /* Build the baseline idle pagetable''s first-level entries */ + ldr x1, =xen_second + add x1, x1, x20 /* x1 := paddr (xen_second) */ + mov x3, #PT_PT /* x2 := table map of xen_second */ + orr x2, x1, x3 /* (+ rights for linear PT) */ + str x2, [x4, #0] /* Map it in slot 0 */ + add x2, x2, #0x1000 + str x2, [x4, #8] /* Map 2nd page in slot 1 */ + add x2, x2, #0x1000 + str x2, [x4, #16] /* Map 3rd page in slot 2 */ + add x2, x2, #0x1000 + str x2, [x4, #24] /* Map 4th page in slot 3 */ + + /* Now set up the second-level entries */ + mov x3, #PT_MEM + orr x2, x19, x3 /* x2 := 2MB normal map of Xen */ + orr x4, xzr, x19, lsr #18 + str x2, [x1, x4] /* Map Xen there */ + ldr x4, =start + lsr x4, x4, #18 /* Slot for vaddr(start) */ + str x2, [x1, x4] /* Map Xen there too */ + + /* xen_fixmap pagetable */ + ldr x2, =xen_fixmap + add x2, x2, x20 /* x2 := paddr (xen_fixmap) */ + mov x3, #PT_PT + orr x2, x2, x3 /* x2 := table map of xen_fixmap */ + add x4, x4, #8 + str x2, [x1, x4] /* Map it in the fixmap''s slot */ + + lsr x2, x21, #21 + lsl x2, x2, #21 /* 2MB-aligned paddr of DTB */ + mov x3, #PT_MEM /* x2 := 2MB RAM incl. DTB */ + orr x2, x2, x3 + add x4, x4, #8 + str x2, [x1, x4] /* Map it in the early boot slot */ + +pt_ready: + PRINT("- Turning on paging -\r\n") + + ldr x1, =paging /* Explicit vaddr, not RIP-relative */ + mrs x0, SCTLR_EL2 + orr x0, x0, #SCTLR_M /* Enable MMU */ + orr x0, x0, #SCTLR_C /* Enable D-cache */ + dsb sy /* Flush PTE writes and finish reads */ + msr SCTLR_EL2, x0 /* now paging is enabled */ + isb /* Now, flush the icache */ + br x1 /* Get a proper vaddr into PC */ +paging: + +#ifdef EARLY_UART_ADDRESS + /* Use a virtual address to access the UART. */ + ldr x23, =FIXMAP_ADDR(FIXMAP_CONSOLE) +#endif + + PRINT("- Ready -\r\n") + + /* The boot CPU should go straight into C now */ + cbz x22, launch + + /* Non-boot CPUs need to move on to the relocated pagetables */ + ldr x4, =boot_ttbr /* VA of TTBR0_EL2 stashed by CPU 0 */ + add x4, x4, x20 /* PA of it */ + ldr x4, [x4] /* Actual value */ + dsb sy + msr TTBR0_EL2, x4 + dsb sy + isb + tlbi alle2 + dsb sy /* Ensure completion of TLB flush */ + isb + + /* Non-boot CPUs report that they''ve got this far */ + ldr x0, =ready_cpus +1: ldaxr x1, [x0] /* { read # of ready CPUs } */ + add x1, x1, #1 /* Atomically { ++ } */ + stlxr w2, x1, [x0] /* { writeback } */ + cbnz w2, 1b + dsb sy + dc cvac, x0 /* Flush D-Cache */ + dsb sy + + /* Here, the non-boot CPUs must wait again -- they''re now running on + * the boot CPU''s pagetables so it''s safe for the boot CPU to + * overwrite the non-relocated copy of Xen. Once it''s done that, + * and brought up the memory allocator, non-boot CPUs can get their + * own stacks and enter C. */ +1: wfe + dsb sy + ldr x0, =smp_up_cpu + ldr x1, [x0] /* Which CPU is being booted? */ + cmp x1, x12 /* Is it us? */ + b.ne 1b + +launch: + ldr x0, =init_stack /* Find the boot-time stack */ + ldr x0, [x0] + add x0, x0, #STACK_SIZE /* (which grows down from the top). */ + sub x0, x0, #CPUINFO_sizeof /* Make room for CPU save record */ + mov sp, x0 + + mov x0, x20 /* Marshal args: - phys_offset */ + mov x1, x21 /* - FDT */ + mov x2, x22 /* - CPU ID */ + cbz x22, start_xen /* and disappear into the land of C */ + b start_secondary /* (to the appropriate entry point) */ + +/* Fail-stop + * r0: string explaining why */ +fail: PRINT("- Boot failed -\r\n") +1: wfe + b 1b + +#ifdef EARLY_UART_ADDRESS + +/* Bring up the UART. Specific to the PL011 UART. + * Clobbers r0-r2 */ +init_uart: + mov x1, #0x0 + strh w1, [x23, #0x24] /* -> UARTIBRD (Baud divisor fraction) */ + mov x1, #0x4 /* 7.3728MHz / 0x4 == 16 * 115200 */ + strh w1, [x23, #0x24] /* -> UARTIBRD (Baud divisor integer) */ + mov x1, #0x60 /* 8n1 */ + strh w1, [x23, #0x24] /* -> UARTLCR_H (Line control) */ + ldr x1, =0x00000301 /* RXE | TXE | UARTEN */ + strh w1, [x23, #0x30] /* -> UARTCR (Control Register) */ + adr x0, 1f + b puts +1: .asciz "- UART enabled -\r\n" + .align 4 + +/* Print early debug messages. Specific to the PL011 UART. + * r0: Nul-terminated string to print. + * Clobbers r0-r2 */ +puts: + ldrh w2, [x23, #0x18] /* <- UARTFR (Flag register) */ + tst w2, #0x8 /* Check BUSY bit */ + b.ne puts /* Wait for the UART to be ready */ + ldrb w2, [x0], #1 /* Load next char */ + cbz w2, 1f /* Exit on nul */ + str w2, [x23] /* -> UARTDR (Data Register) */ + b puts +1: + ret + +/* Print a 32-bit number in hex. Specific to the PL011 UART. + * r0: Number to print. + * clobbers r0-r3 */ +putn: + adr x1, hex + mov x3, #8 +1: ldrh w2, [x23, #0x18] /* <- UARTFR (Flag register) */ + tst w2, #0x8 /* Check BUSY bit */ + b.ne 1b /* Wait for the UART to be ready */ + and x2, x0, #0xf0000000 /* Mask off the top nybble */ + lsr x2, x2, #28 + ldrb w2, [x1, x2] /* Convert to a char */ + strb w2, [x23] /* -> UARTDR (Data Register) */ + lsl x0, x0, #4 /* Roll it through one nybble at a time */ + subs x3, x3, #1 + b.ne 1b + ret + +hex: .ascii "0123456789abcdef" + .align 2 + +#else /* EARLY_UART_ADDRESS */ + +init_uart: +.global early_puts +early_puts: +puts: +putn: mov pc, lr + +#endif /* EARLY_UART_ADDRESS */ diff --git a/xen/arch/arm/arm64/mode_switch.S b/xen/arch/arm/arm64/mode_switch.S new file mode 100644 index 0000000..4c38181 --- /dev/null +++ b/xen/arch/arm/arm64/mode_switch.S @@ -0,0 +1,83 @@ +/* + * xen/arch/arm/arm64/mode_switch.S + * + * Start-of day code to take a CPU from EL3 to EL2. Largely taken from + * bootwrapper. + * + * Ian Campbell <ian.campbell@citrix.com> + * Copyright (c) 2012 Citrix Systems. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <asm/config.h> +#include <asm/page.h> +#include <asm/asm_defns.h> + +/* Get up a CPU into EL2. Clobbers x0-x3. + * + * Expects x22 == CPU number + * Expects x30 == EL2 entry point + * + * This code is specific to the VE model, and not intended to be used + * on production systems. As such it''s a bit hackier than the main + * boot code in head.S. In future it will be replaced by better + * integration with the bootloader/firmware so that Xen always starts + * at EL2. + */ + +.globl enter_el2_mode +enter_el2_mode: + mov x0, #0x30 // RES1 + orr x0, x0, #(1 << 0) // Non-secure EL1 + orr x0, x0, #(1 << 8) // HVC enable + orr x0, x0, #(1 << 10) // 64-bit EL2 + msr scr_el3, x0 + + msr cptr_el3, xzr // Disable copro. traps to EL3 + + ldr x0, =0x01800000 // 24Mhz + msr cntfrq_el0, x0 + + /* + * Check for the primary CPU to avoid a race on the distributor + * registers. + */ + cbnz x22, 1f + + ldr x1, =(GIC_BASE_ADDRESS+GIC_DR_OFFSET) // GICD_CTLR + mov w0, #3 // EnableGrp0 | EnableGrp1 + str w0, [x1] + +1: ldr x1, =(GIC_BASE_ADDRESS+GIC_DR_OFFSET+0x80) // GICD_IGROUPR + mov w0, #~0 // Grp1 interrupts + str w0, [x1], #4 + b.ne 2f // Only local interrupts for secondary CPUs + str w0, [x1], #4 + str w0, [x1], #4 + +2: ldr x1, =(GIC_BASE_ADDRESS+GIC_CR_OFFSET) // GICC_CTLR + ldr w0, [x1] + mov w0, #3 // EnableGrp0 | EnableGrp1 + str w0, [x1] + + mov w0, #1 << 7 // allow NS access to GICC_PMR + str w0, [x1, #4] // GICC_PMR + + msr sctlr_el2, xzr + + /* + * Prepare the switch to the EL2_SP1 mode from EL3 + */ + msr elr_el3, x30 // Return to desired function + mov x1, #0x3c9 // EL2_SP1 | D | A | I | F + msr spsr_el3, x1 + eret diff --git a/xen/arch/arm/xen.lds.S b/xen/arch/arm/xen.lds.S index 410d7db..b1f0a78 100644 --- a/xen/arch/arm/xen.lds.S +++ b/xen/arch/arm/xen.lds.S @@ -11,7 +11,13 @@ ENTRY(start) -OUTPUT_ARCH(arm) +#if defined(__arm__) +#define FORMAT arm +#elif defined(__aarch64__) +#define FORMAT aarch64 +#endif + +OUTPUT_ARCH(FORMAT) PHDRS { diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h index 9acd0af..e0a636f 100644 --- a/xen/include/asm-arm/page.h +++ b/xen/include/asm-arm/page.h @@ -38,6 +38,7 @@ */ #define MAIR0VAL 0xeeaa4400 #define MAIR1VAL 0xff000004 +#define MAIRVAL (MAIR0VAL|MAIR1VAL<<32) /* * Attribute Indexes. diff --git a/xen/include/public/arch-arm.h b/xen/include/public/arch-arm.h index 8dd9062..dc12524 100644 --- a/xen/include/public/arch-arm.h +++ b/xen/include/public/arch-arm.h @@ -174,6 +174,8 @@ typedef uint64_t xen_callback_t; /* 0-4: Mode */ #define PSR_MODE_MASK 0x1f + +/* 32 bit modes */ #define PSR_MODE_USR 0x10 #define PSR_MODE_FIQ 0x11 #define PSR_MODE_IRQ 0x12 @@ -184,6 +186,18 @@ typedef uint64_t xen_callback_t; #define PSR_MODE_UND 0x1b #define PSR_MODE_SYS 0x1f +/* 64 bit modes */ +#ifdef CONFIG_ARM_64 +#define PSR_MODE_BIT 0x10 /* Set iff AArch32 */ +#define PSR_MODE_EL3h 0x0d +#define PSR_MODE_EL3t 0x0c +#define PSR_MODE_EL2h 0x09 +#define PSR_MODE_EL2t 0x08 +#define PSR_MODE_EL1h 0x05 +#define PSR_MODE_EL1t 0x04 +#define PSR_MODE_EL0t 0x00 +#endif + #define PSR_THUMB (1<<5) /* Thumb Mode enable */ #define PSR_FIQ_MASK (1<<6) /* Fast Interrupt mask */ #define PSR_IRQ_MASK (1<<7) /* Interrupt mask */ diff --git a/xen/include/public/hvm/save.h b/xen/include/public/hvm/save.h index 5538d8e..cc8b5fd 100644 --- a/xen/include/public/hvm/save.h +++ b/xen/include/public/hvm/save.h @@ -102,7 +102,7 @@ DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end); #if defined(__i386__) || defined(__x86_64__) #include "../arch-x86/hvm/save.h" -#elif defined(__arm__) +#elif defined(__arm__) || defined(__aarch64__) #include "../arch-arm/hvm/save.h" #else #error "unsupported architecture" diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h index 846f446..a1927c0 100644 --- a/xen/include/public/xen.h +++ b/xen/include/public/xen.h @@ -31,7 +31,7 @@ #if defined(__i386__) || defined(__x86_64__) #include "arch-x86/xen.h" -#elif defined(__arm__) +#elif defined(__arm__) || defined (__aarch64__) #include "arch-arm.h" #else #error "Unsupported architecture" diff --git a/xen/include/xen/libelf.h b/xen/include/xen/libelf.h index e8f6508..218bb18 100644 --- a/xen/include/xen/libelf.h +++ b/xen/include/xen/libelf.h @@ -23,7 +23,7 @@ #ifndef __XEN_LIBELF_H__ #define __XEN_LIBELF_H__ -#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) +#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) #define XEN_ELF_LITTLE_ENDIAN #else #error define architectural endianness -- 1.7.2.5
At 16:02 +0000 on 21 Feb (1361462564), Ian Campbell wrote:> On Thu, 2013-02-21 at 15:36 +0000, Tim Deegan wrote: > > At 15:25 +0000 on 21 Feb (1361460324), Ian Campbell wrote: > > > On Thu, 2013-02-21 at 15:10 +0000, Tim Deegan wrote: > > > > At 16:47 +0000 on 14 Feb (1360860461), Ian Campbell wrote: > > > > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > > > > > --- > > > > > v2: Call leave_hypervisor_tail on exit back to guest, disable interrupts while > > > > > restoring state. > > > > > > > > You don''t seem to have addressed my other comments on v1: > > > > > > I''ve got them in v3, I noted that I hadn''t addresses you comment on this > > > patch in the #0/46. > > > > So you did; I did read the 0/46, but for some reason all that stuck in > > my head was the WFE stuff. > > > > AFAICS you just need to re-roll this and #25, > > I don''t think you mean #25? That is "xen: arm64: add guest type to > domain field." which you''ve acked.Sorry, I meant #5.> When I''m applying my own patches I prefer to do it from the list rather > than short cutting them from my own tree, keep me honest/from making > mistakes. How about I include an index of acked/unacked patches in the > zeroeth mail? You ought to be able to just mark it all as read.Fair enough -- no need to index them; I just keep all previous versions of a series around so I can easily find the comments. But this won''t need a v4, will it? :) Tim.
Tim Deegan
2013-Feb-21 16:08 UTC
Re: [PATCH V2 05/46] xen: arm64: initial build + config changes, start of day code
At 16:03 +0000 on 21 Feb (1361462587), Ian Campbell wrote:> On Thu, 2013-02-21 at 15:26 +0000, Ian Campbell wrote: > > On Thu, 2013-02-21 at 14:56 +0000, Tim Deegan wrote: > > > At 16:47 +0000 on 14 Feb (1360860439), Ian Campbell wrote: > > > > +2: PRINT("- Started in Hyp mode -\r\n") > > > > + > > > > +hyp: > > > > > > I though we were going to use "EL3" instead of "Hyp". > > > > Sorry, looks like I missed a few comments when I went through this one. > > s/EL3/EL2/g > > This is what I have now: > > 8<----------------------------------------- > > From e4587a06df0d04ccbfd04ec7cc371900fe7dabf4 Mon Sep 17 00:00:00 2001 > From: Ian Campbell <ian.campbell@citrix.com> > Date: Mon, 10 Dec 2012 14:19:00 +0000 > Subject: [PATCH] xen: arm64: initial build + config changes, start of day code > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>Acked-by: Tim Deegan <tim@xen.org>> --- > v3: - s/hyp/el2/ > - remove dead code > - fix comment formatting > v2: - Add PSR_MODE definitions for 64-bit to arch-arm.h and use instead of > defining in head.S > - Nuke hard tabs in head.S and mode_switch.S with expand(1) > --- > Config.mk | 2 +- > config/arm64.mk | 12 ++ > xen/arch/arm/Makefile | 1 + > xen/arch/arm/Rules.mk | 6 + > xen/arch/arm/arm64/Makefile | 1 + > xen/arch/arm/arm64/head.S | 393 ++++++++++++++++++++++++++++++++++++++ > xen/arch/arm/arm64/mode_switch.S | 83 ++++++++ > xen/arch/arm/xen.lds.S | 8 +- > xen/include/asm-arm/page.h | 1 + > xen/include/public/arch-arm.h | 14 ++ > xen/include/public/hvm/save.h | 2 +- > xen/include/public/xen.h | 2 +- > xen/include/xen/libelf.h | 2 +- > 13 files changed, 522 insertions(+), 5 deletions(-) > create mode 100644 config/arm64.mk > create mode 100644 xen/arch/arm/arm64/Makefile > create mode 100644 xen/arch/arm/arm64/head.S > create mode 100644 xen/arch/arm/arm64/mode_switch.S > > diff --git a/Config.mk b/Config.mk > index 64541c8..ea64925 100644 > --- a/Config.mk > +++ b/Config.mk > @@ -15,7 +15,7 @@ debug_symbols ?= $(debug) > > XEN_COMPILE_ARCH ?= $(shell uname -m | sed -e s/i.86/x86_32/ \ > -e s/i86pc/x86_32/ -e s/amd64/x86_64/ \ > - -e s/armv7.*/arm32/) > + -e s/armv7.*/arm32/ -e s/armv8.*/arm64/) > > XEN_TARGET_ARCH ?= $(XEN_COMPILE_ARCH) > XEN_OS ?= $(shell uname -s) > diff --git a/config/arm64.mk b/config/arm64.mk > new file mode 100644 > index 0000000..b2457eb > --- /dev/null > +++ b/config/arm64.mk > @@ -0,0 +1,12 @@ > +CONFIG_ARM := y > +CONFIG_ARM_64 := y > +CONFIG_ARM_$(XEN_OS) := y > + > +CFLAGS += #-marm -march= -mcpu= etc > + > +HAS_PL011 := y > + > +# Use only if calling $(LD) directly. > +LDFLAGS_DIRECT += -maarch64elf > + > +CONFIG_LOAD_ADDRESS ?= 0x80000000 > diff --git a/xen/arch/arm/Makefile b/xen/arch/arm/Makefile > index f2822f2..7ff67c7 100644 > --- a/xen/arch/arm/Makefile > +++ b/xen/arch/arm/Makefile > @@ -1,4 +1,5 @@ > subdir-$(arm32) += arm32 > +subdir-$(arm64) += arm64 > > obj-y += early_printk.o > obj-y += domain.o > diff --git a/xen/arch/arm/Rules.mk b/xen/arch/arm/Rules.mk > index 5b5768a..29b605d 100644 > --- a/xen/arch/arm/Rules.mk > +++ b/xen/arch/arm/Rules.mk > @@ -26,6 +26,12 @@ arm32 := y > arm64 := n > endif > > +ifeq ($(TARGET_SUBARCH),arm64) > +CFLAGS += -mcpu=generic > +arm32 := n > +arm64 := y > +endif > + > ifneq ($(call cc-option,$(CC),-fvisibility=hidden,n),n) > CFLAGS += -DGCC_HAS_VISIBILITY_ATTRIBUTE > endif > diff --git a/xen/arch/arm/arm64/Makefile b/xen/arch/arm/arm64/Makefile > new file mode 100644 > index 0000000..dffbeb1 > --- /dev/null > +++ b/xen/arch/arm/arm64/Makefile > @@ -0,0 +1 @@ > +obj-y += mode_switch.o > diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S > new file mode 100644 > index 0000000..b7ab251 > --- /dev/null > +++ b/xen/arch/arm/arm64/head.S > @@ -0,0 +1,393 @@ > +/* > + * xen/arch/arm/head.S > + * > + * Start-of-day code for an ARMv8. > + * > + * Ian Campbell <ian.campbell@citrix.com> > + * Copyright (c) 2012 Citrix Systems. > + * > + * Based on ARMv7-A head.S by > + * Tim Deegan <tim@xen.org> > + * > + * This program is free software; you can redistribute it and/or modify > + * it under the terms of the GNU General Public License as published by > + * the Free Software Foundation; either version 2 of the License, or > + * (at your option) any later version. > + * > + * This program is distributed in the hope that it will be useful, > + * but WITHOUT ANY WARRANTY; without even the implied warranty of > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the > + * GNU General Public License for more details. > + */ > + > +#include <asm/config.h> > +#include <asm/page.h> > +#include <asm/asm_defns.h> > + > +#define PT_PT 0xe7f /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=111 T=1 P=1 */ > +#define PT_MEM 0xe7d /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=111 T=0 P=1 */ > +#define PT_DEV 0xe71 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=0 P=1 */ > +#define PT_DEV_L3 0xe73 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=1 P=1 */ > + > +/* Macro to print a string to the UART, if there is one. > + * Clobbers r0-r3. */ > +#ifdef EARLY_UART_ADDRESS > +#define PRINT(_s) \ > + adr x0, 98f ; \ > + bl puts ; \ > + b 99f ; \ > +98: .asciz _s ; \ > + .align 2 ; \ > +99: > +#else > +#define PRINT(s) > +#endif > + > + /*.aarch64*/ > + > + /* > + * Kernel startup entry point. > + * --------------------------- > + * > + * The requirements are: > + * MMU = off, D-cache = off, I-cache = on or off, > + * x0 = physical address to the FDT blob. > + * > + * This must be the very first address in the loaded image. > + * It should be linked at XEN_VIRT_START, and loaded at any > + * 2MB-aligned address. All of text+data+bss must fit in 2MB, > + * or the initial pagetable code below will need adjustment. > + */ > + > + .global start > +start: > + /* > + * DO NOT MODIFY. Image header expected by Linux boot-loaders. > + */ > + b real_start /* branch to kernel start, magic */ > + .long 0 /* reserved */ > + .quad 0 /* Image load offset from start of RAM */ > + .quad 0 /* reserved */ > + .quad 0 /* reserved */ > + > +real_start: > + msr DAIFSet, 0xf /* Disable all interrupts */ > + > + /* Save the bootloader arguments in less-clobberable registers */ > + mov x21, x0 /* x21 := DTB, physical address */ > + > + /* Find out where we are */ > + ldr x0, =start > + adr x19, start /* x19 := paddr (start) */ > + sub x20, x19, x0 /* x20 := phys-offset */ > + > + /* Using the DTB in the .dtb section? */ > +#ifdef CONFIG_DTB_FILE > + ldr x21, =_sdtb > + add x21, x21, x20 /* x21 := paddr(DTB) */ > +#endif > + > + /* Are we the boot CPU? */ > + mov x22, #0 /* x22 := CPU ID */ > + mrs x0, mpidr_el1 > + tbz x0, 31, boot_cpu /* Multiprocessor extension supported? */ > + tbnz x0, 30, boot_cpu /* Uniprocessor system? */ > + > + mov x13, #(0xff << 24) > + bics x22, x0, x13 /* Mask out flags to get CPU ID */ > + b.eq boot_cpu /* If we''re CPU 0, boot now */ > + > + /* Non-boot CPUs wait here to be woken up one at a time. */ > +1: dsb sy > + ldr x0, =smp_up_cpu /* VA of gate */ > + add x0, x0, x20 /* PA of gate */ > + ldr x1, [x0] /* Which CPU is being booted? */ > + cmp x1, x22 /* Is it us? */ > + b.eq 2f > + wfe > + b 1b > +2: > + > +boot_cpu: > +#ifdef EARLY_UART_ADDRESS > + ldr x23, =EARLY_UART_ADDRESS /* x23 := UART base address */ > + cbnz x22, 1f > + bl init_uart /* CPU 0 sets up the UART too */ > +1: PRINT("- CPU ") > + mov x0, x22 > + bl putn > + PRINT(" booting -\r\n") > +#endif > + > + PRINT("- Current EL ") > + mrs x0, CurrentEL > + bl putn > + PRINT(" -\r\n") > + > + /* Are we in EL3 */ > + mrs x0, CurrentEL > + cmp x0, #PSR_MODE_EL3t > + ccmp x0, #PSR_MODE_EL3h, #0x4, ne > + b.eq 1f /* Yes */ > + > + /* Are we in EL2 */ > + cmp x0, #PSR_MODE_EL2t > + ccmp x0, #PSR_MODE_EL2h, #0x4, ne > + b.eq 2f /* Yes */ > + > + /* Otherwise, it must have been EL0 or EL1 */ > + PRINT("- CPU is not in EL3 or EL2 -\r\n") > + b fail > + > +1: PRINT("- Started in EL3 -\r\n- Entering EL2 -\r\n") > + ldr x1, =enter_el2_mode /* VA of function */ > + add x1, x1, x20 /* PA of function */ > + adr x30, el2 /* Set return address for call */ > + br x1 /* Call function */ > + > +2: PRINT("- Started in EL2 mode -\r\n") > + > +el2: > + /* Zero BSS On the boot CPU to avoid nasty surprises */ > + cbnz x22, skip_bss > + > + PRINT("- Zero BSS -\r\n") > + ldr x0, =__bss_start /* Load start & end of bss */ > + ldr x1, =__bss_end > + add x0, x0, x20 /* Apply physical offset */ > + add x1, x1, x20 > + > +1: str xzr, [x0], #8 > + cmp x0, x1 > + b.lo 1b > + > +skip_bss: > + > + PRINT("- Setting up control registers -\r\n") > + > + /* Set up memory attribute type tables */ > + ldr x0, =MAIRVAL > + msr mair_el2, x0 > + > + /* Set up the HTCR: > + * PASize -- 4G > + * Top byte is used > + * PT walks use Outer-Shareable accesses, > + * PT walks are write-back, no-write-allocate in both cache levels, > + * Full 64-bit address space goes through this table. */ > + ldr x0, =0x80802500 > + msr tcr_el2, x0 > + > + /* Set up the HSCTLR: > + * Exceptions in LE ARM, > + * Low-latency IRQs disabled, > + * Write-implies-XN disabled (for now), > + * D-cache disabled (for now), > + * I-cache enabled, > + * Alignment checking enabled, > + * MMU translation disabled (for now). */ > + ldr x0, =(HSCTLR_BASE|SCTLR_A) > + msr SCTLR_EL2, x0 > + > + /* Write Xen''s PT''s paddr into the HTTBR */ > + ldr x4, =xen_pgtable > + add x4, x4, x20 /* x4 := paddr (xen_pagetable) */ > + msr TTBR0_EL2, x4 > + > + /* Non-boot CPUs don''t need to rebuild the pagetable */ > + cbnz x22, pt_ready > + > + ldr x1, =xen_first > + add x1, x1, x20 /* x1 := paddr (xen_first) */ > + mov x3, #PT_PT /* x2 := table map of xen_first */ > + orr x2, x1, x3 /* (+ rights for linear PT) */ > + str x2, [x4, #0] /* Map it in slot 0 */ > + > + mov x4, x1 /* Next level into xen_first */ > + > + /* console fixmap */ > +#ifdef EARLY_UART_ADDRESS > + ldr x1, =xen_fixmap > + add x1, x1, x20 /* x1 := paddr (xen_fixmap) */ > + lsr x2, x23, #12 > + lsl x2, x2, #12 /* 4K aligned paddr of UART */ > + mov x3, #PT_DEV_L3 > + orr x2, x2, x3 /* x2 := 4K dev map including UART */ > + str x2, [x1, #(FIXMAP_CONSOLE*8)] /* Map it in the first fixmap''s slot */ > +#endif > + > + /* Build the baseline idle pagetable''s first-level entries */ > + ldr x1, =xen_second > + add x1, x1, x20 /* x1 := paddr (xen_second) */ > + mov x3, #PT_PT /* x2 := table map of xen_second */ > + orr x2, x1, x3 /* (+ rights for linear PT) */ > + str x2, [x4, #0] /* Map it in slot 0 */ > + add x2, x2, #0x1000 > + str x2, [x4, #8] /* Map 2nd page in slot 1 */ > + add x2, x2, #0x1000 > + str x2, [x4, #16] /* Map 3rd page in slot 2 */ > + add x2, x2, #0x1000 > + str x2, [x4, #24] /* Map 4th page in slot 3 */ > + > + /* Now set up the second-level entries */ > + mov x3, #PT_MEM > + orr x2, x19, x3 /* x2 := 2MB normal map of Xen */ > + orr x4, xzr, x19, lsr #18 > + str x2, [x1, x4] /* Map Xen there */ > + ldr x4, =start > + lsr x4, x4, #18 /* Slot for vaddr(start) */ > + str x2, [x1, x4] /* Map Xen there too */ > + > + /* xen_fixmap pagetable */ > + ldr x2, =xen_fixmap > + add x2, x2, x20 /* x2 := paddr (xen_fixmap) */ > + mov x3, #PT_PT > + orr x2, x2, x3 /* x2 := table map of xen_fixmap */ > + add x4, x4, #8 > + str x2, [x1, x4] /* Map it in the fixmap''s slot */ > + > + lsr x2, x21, #21 > + lsl x2, x2, #21 /* 2MB-aligned paddr of DTB */ > + mov x3, #PT_MEM /* x2 := 2MB RAM incl. DTB */ > + orr x2, x2, x3 > + add x4, x4, #8 > + str x2, [x1, x4] /* Map it in the early boot slot */ > + > +pt_ready: > + PRINT("- Turning on paging -\r\n") > + > + ldr x1, =paging /* Explicit vaddr, not RIP-relative */ > + mrs x0, SCTLR_EL2 > + orr x0, x0, #SCTLR_M /* Enable MMU */ > + orr x0, x0, #SCTLR_C /* Enable D-cache */ > + dsb sy /* Flush PTE writes and finish reads */ > + msr SCTLR_EL2, x0 /* now paging is enabled */ > + isb /* Now, flush the icache */ > + br x1 /* Get a proper vaddr into PC */ > +paging: > + > +#ifdef EARLY_UART_ADDRESS > + /* Use a virtual address to access the UART. */ > + ldr x23, =FIXMAP_ADDR(FIXMAP_CONSOLE) > +#endif > + > + PRINT("- Ready -\r\n") > + > + /* The boot CPU should go straight into C now */ > + cbz x22, launch > + > + /* Non-boot CPUs need to move on to the relocated pagetables */ > + ldr x4, =boot_ttbr /* VA of TTBR0_EL2 stashed by CPU 0 */ > + add x4, x4, x20 /* PA of it */ > + ldr x4, [x4] /* Actual value */ > + dsb sy > + msr TTBR0_EL2, x4 > + dsb sy > + isb > + tlbi alle2 > + dsb sy /* Ensure completion of TLB flush */ > + isb > + > + /* Non-boot CPUs report that they''ve got this far */ > + ldr x0, =ready_cpus > +1: ldaxr x1, [x0] /* { read # of ready CPUs } */ > + add x1, x1, #1 /* Atomically { ++ } */ > + stlxr w2, x1, [x0] /* { writeback } */ > + cbnz w2, 1b > + dsb sy > + dc cvac, x0 /* Flush D-Cache */ > + dsb sy > + > + /* Here, the non-boot CPUs must wait again -- they''re now running on > + * the boot CPU''s pagetables so it''s safe for the boot CPU to > + * overwrite the non-relocated copy of Xen. Once it''s done that, > + * and brought up the memory allocator, non-boot CPUs can get their > + * own stacks and enter C. */ > +1: wfe > + dsb sy > + ldr x0, =smp_up_cpu > + ldr x1, [x0] /* Which CPU is being booted? */ > + cmp x1, x12 /* Is it us? */ > + b.ne 1b > + > +launch: > + ldr x0, =init_stack /* Find the boot-time stack */ > + ldr x0, [x0] > + add x0, x0, #STACK_SIZE /* (which grows down from the top). */ > + sub x0, x0, #CPUINFO_sizeof /* Make room for CPU save record */ > + mov sp, x0 > + > + mov x0, x20 /* Marshal args: - phys_offset */ > + mov x1, x21 /* - FDT */ > + mov x2, x22 /* - CPU ID */ > + cbz x22, start_xen /* and disappear into the land of C */ > + b start_secondary /* (to the appropriate entry point) */ > + > +/* Fail-stop > + * r0: string explaining why */ > +fail: PRINT("- Boot failed -\r\n") > +1: wfe > + b 1b > + > +#ifdef EARLY_UART_ADDRESS > + > +/* Bring up the UART. Specific to the PL011 UART. > + * Clobbers r0-r2 */ > +init_uart: > + mov x1, #0x0 > + strh w1, [x23, #0x24] /* -> UARTIBRD (Baud divisor fraction) */ > + mov x1, #0x4 /* 7.3728MHz / 0x4 == 16 * 115200 */ > + strh w1, [x23, #0x24] /* -> UARTIBRD (Baud divisor integer) */ > + mov x1, #0x60 /* 8n1 */ > + strh w1, [x23, #0x24] /* -> UARTLCR_H (Line control) */ > + ldr x1, =0x00000301 /* RXE | TXE | UARTEN */ > + strh w1, [x23, #0x30] /* -> UARTCR (Control Register) */ > + adr x0, 1f > + b puts > +1: .asciz "- UART enabled -\r\n" > + .align 4 > + > +/* Print early debug messages. Specific to the PL011 UART. > + * r0: Nul-terminated string to print. > + * Clobbers r0-r2 */ > +puts: > + ldrh w2, [x23, #0x18] /* <- UARTFR (Flag register) */ > + tst w2, #0x8 /* Check BUSY bit */ > + b.ne puts /* Wait for the UART to be ready */ > + ldrb w2, [x0], #1 /* Load next char */ > + cbz w2, 1f /* Exit on nul */ > + str w2, [x23] /* -> UARTDR (Data Register) */ > + b puts > +1: > + ret > + > +/* Print a 32-bit number in hex. Specific to the PL011 UART. > + * r0: Number to print. > + * clobbers r0-r3 */ > +putn: > + adr x1, hex > + mov x3, #8 > +1: ldrh w2, [x23, #0x18] /* <- UARTFR (Flag register) */ > + tst w2, #0x8 /* Check BUSY bit */ > + b.ne 1b /* Wait for the UART to be ready */ > + and x2, x0, #0xf0000000 /* Mask off the top nybble */ > + lsr x2, x2, #28 > + ldrb w2, [x1, x2] /* Convert to a char */ > + strb w2, [x23] /* -> UARTDR (Data Register) */ > + lsl x0, x0, #4 /* Roll it through one nybble at a time */ > + subs x3, x3, #1 > + b.ne 1b > + ret > + > +hex: .ascii "0123456789abcdef" > + .align 2 > + > +#else /* EARLY_UART_ADDRESS */ > + > +init_uart: > +.global early_puts > +early_puts: > +puts: > +putn: mov pc, lr > + > +#endif /* EARLY_UART_ADDRESS */ > diff --git a/xen/arch/arm/arm64/mode_switch.S b/xen/arch/arm/arm64/mode_switch.S > new file mode 100644 > index 0000000..4c38181 > --- /dev/null > +++ b/xen/arch/arm/arm64/mode_switch.S > @@ -0,0 +1,83 @@ > +/* > + * xen/arch/arm/arm64/mode_switch.S > + * > + * Start-of day code to take a CPU from EL3 to EL2. Largely taken from > + * bootwrapper. > + * > + * Ian Campbell <ian.campbell@citrix.com> > + * Copyright (c) 2012 Citrix Systems. > + * > + * This program is free software; you can redistribute it and/or modify > + * it under the terms of the GNU General Public License as published by > + * the Free Software Foundation; either version 2 of the License, or > + * (at your option) any later version. > + * > + * This program is distributed in the hope that it will be useful, > + * but WITHOUT ANY WARRANTY; without even the implied warranty of > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the > + * GNU General Public License for more details. > + */ > + > +#include <asm/config.h> > +#include <asm/page.h> > +#include <asm/asm_defns.h> > + > +/* Get up a CPU into EL2. Clobbers x0-x3. > + * > + * Expects x22 == CPU number > + * Expects x30 == EL2 entry point > + * > + * This code is specific to the VE model, and not intended to be used > + * on production systems. As such it''s a bit hackier than the main > + * boot code in head.S. In future it will be replaced by better > + * integration with the bootloader/firmware so that Xen always starts > + * at EL2. > + */ > + > +.globl enter_el2_mode > +enter_el2_mode: > + mov x0, #0x30 // RES1 > + orr x0, x0, #(1 << 0) // Non-secure EL1 > + orr x0, x0, #(1 << 8) // HVC enable > + orr x0, x0, #(1 << 10) // 64-bit EL2 > + msr scr_el3, x0 > + > + msr cptr_el3, xzr // Disable copro. traps to EL3 > + > + ldr x0, =0x01800000 // 24Mhz > + msr cntfrq_el0, x0 > + > + /* > + * Check for the primary CPU to avoid a race on the distributor > + * registers. > + */ > + cbnz x22, 1f > + > + ldr x1, =(GIC_BASE_ADDRESS+GIC_DR_OFFSET) // GICD_CTLR > + mov w0, #3 // EnableGrp0 | EnableGrp1 > + str w0, [x1] > + > +1: ldr x1, =(GIC_BASE_ADDRESS+GIC_DR_OFFSET+0x80) // GICD_IGROUPR > + mov w0, #~0 // Grp1 interrupts > + str w0, [x1], #4 > + b.ne 2f // Only local interrupts for secondary CPUs > + str w0, [x1], #4 > + str w0, [x1], #4 > + > +2: ldr x1, =(GIC_BASE_ADDRESS+GIC_CR_OFFSET) // GICC_CTLR > + ldr w0, [x1] > + mov w0, #3 // EnableGrp0 | EnableGrp1 > + str w0, [x1] > + > + mov w0, #1 << 7 // allow NS access to GICC_PMR > + str w0, [x1, #4] // GICC_PMR > + > + msr sctlr_el2, xzr > + > + /* > + * Prepare the switch to the EL2_SP1 mode from EL3 > + */ > + msr elr_el3, x30 // Return to desired function > + mov x1, #0x3c9 // EL2_SP1 | D | A | I | F > + msr spsr_el3, x1 > + eret > diff --git a/xen/arch/arm/xen.lds.S b/xen/arch/arm/xen.lds.S > index 410d7db..b1f0a78 100644 > --- a/xen/arch/arm/xen.lds.S > +++ b/xen/arch/arm/xen.lds.S > @@ -11,7 +11,13 @@ > > ENTRY(start) > > -OUTPUT_ARCH(arm) > +#if defined(__arm__) > +#define FORMAT arm > +#elif defined(__aarch64__) > +#define FORMAT aarch64 > +#endif > + > +OUTPUT_ARCH(FORMAT) > > PHDRS > { > diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h > index 9acd0af..e0a636f 100644 > --- a/xen/include/asm-arm/page.h > +++ b/xen/include/asm-arm/page.h > @@ -38,6 +38,7 @@ > */ > #define MAIR0VAL 0xeeaa4400 > #define MAIR1VAL 0xff000004 > +#define MAIRVAL (MAIR0VAL|MAIR1VAL<<32) > > /* > * Attribute Indexes. > diff --git a/xen/include/public/arch-arm.h b/xen/include/public/arch-arm.h > index 8dd9062..dc12524 100644 > --- a/xen/include/public/arch-arm.h > +++ b/xen/include/public/arch-arm.h > @@ -174,6 +174,8 @@ typedef uint64_t xen_callback_t; > > /* 0-4: Mode */ > #define PSR_MODE_MASK 0x1f > + > +/* 32 bit modes */ > #define PSR_MODE_USR 0x10 > #define PSR_MODE_FIQ 0x11 > #define PSR_MODE_IRQ 0x12 > @@ -184,6 +186,18 @@ typedef uint64_t xen_callback_t; > #define PSR_MODE_UND 0x1b > #define PSR_MODE_SYS 0x1f > > +/* 64 bit modes */ > +#ifdef CONFIG_ARM_64 > +#define PSR_MODE_BIT 0x10 /* Set iff AArch32 */ > +#define PSR_MODE_EL3h 0x0d > +#define PSR_MODE_EL3t 0x0c > +#define PSR_MODE_EL2h 0x09 > +#define PSR_MODE_EL2t 0x08 > +#define PSR_MODE_EL1h 0x05 > +#define PSR_MODE_EL1t 0x04 > +#define PSR_MODE_EL0t 0x00 > +#endif > + > #define PSR_THUMB (1<<5) /* Thumb Mode enable */ > #define PSR_FIQ_MASK (1<<6) /* Fast Interrupt mask */ > #define PSR_IRQ_MASK (1<<7) /* Interrupt mask */ > diff --git a/xen/include/public/hvm/save.h b/xen/include/public/hvm/save.h > index 5538d8e..cc8b5fd 100644 > --- a/xen/include/public/hvm/save.h > +++ b/xen/include/public/hvm/save.h > @@ -102,7 +102,7 @@ DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end); > > #if defined(__i386__) || defined(__x86_64__) > #include "../arch-x86/hvm/save.h" > -#elif defined(__arm__) > +#elif defined(__arm__) || defined(__aarch64__) > #include "../arch-arm/hvm/save.h" > #else > #error "unsupported architecture" > diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h > index 846f446..a1927c0 100644 > --- a/xen/include/public/xen.h > +++ b/xen/include/public/xen.h > @@ -31,7 +31,7 @@ > > #if defined(__i386__) || defined(__x86_64__) > #include "arch-x86/xen.h" > -#elif defined(__arm__) > +#elif defined(__arm__) || defined (__aarch64__) > #include "arch-arm.h" > #else > #error "Unsupported architecture" > diff --git a/xen/include/xen/libelf.h b/xen/include/xen/libelf.h > index e8f6508..218bb18 100644 > --- a/xen/include/xen/libelf.h > +++ b/xen/include/xen/libelf.h > @@ -23,7 +23,7 @@ > #ifndef __XEN_LIBELF_H__ > #define __XEN_LIBELF_H__ > > -#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) > +#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) > #define XEN_ELF_LITTLE_ENDIAN > #else > #error define architectural endianness > -- > 1.7.2.5 > > > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xen.org > http://lists.xen.org/xen-devel
On Thu, 2013-02-21 at 16:07 +0000, Tim Deegan wrote:> At 16:02 +0000 on 21 Feb (1361462564), Ian Campbell wrote: > > On Thu, 2013-02-21 at 15:36 +0000, Tim Deegan wrote: > > > At 15:25 +0000 on 21 Feb (1361460324), Ian Campbell wrote: > > > > On Thu, 2013-02-21 at 15:10 +0000, Tim Deegan wrote: > > > > > At 16:47 +0000 on 14 Feb (1360860461), Ian Campbell wrote: > > > > > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > > > > > > --- > > > > > > v2: Call leave_hypervisor_tail on exit back to guest, disable interrupts while > > > > > > restoring state. > > > > > > > > > > You don''t seem to have addressed my other comments on v1: > > > > > > > > I''ve got them in v3, I noted that I hadn''t addresses you comment on this > > > > patch in the #0/46. > > > > > > So you did; I did read the 0/46, but for some reason all that stuck in > > > my head was the WFE stuff. > > > > > > AFAICS you just need to re-roll this and #25, > > > > I don''t think you mean #25? That is "xen: arm64: add guest type to > > domain field." which you''ve acked. > > Sorry, I meant #5.Ah yes, I''ve just resent that one.> > When I''m applying my own patches I prefer to do it from the list rather > > than short cutting them from my own tree, keep me honest/from making > > mistakes. How about I include an index of acked/unacked patches in the > > zeroeth mail? You ought to be able to just mark it all as read. > > Fair enough -- no need to index them; I just keep all previous versions > of a series around so I can easily find the comments. But this won''t > need a v4, will it? :)I hope not! Ian.
Stefano Stabellini
2013-Feb-21 16:23 UTC
Re: [PATCH V2 23/46] xen: arm: add register_t type, native register size for the hypervisor
On Thu, 21 Feb 2013, Ian Campbell wrote:> On Thu, 2013-02-14 at 16:47 +0000, Ian Campbell wrote: > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > > Acked-by: Tim Deegan <tim@xen.org> > > but: > > This is mostly a matter of coding taste, so I''d like Stefano''s > > ack/nack here as well. > > Stefano, any strong opinion?Are there any concrete benefits in introducing register_t compared to using unsigned long?> > xen/arch/arm/domain_build.c | 2 +- > > xen/arch/arm/smpboot.c | 2 +- > > xen/arch/arm/traps.c | 44 ++++++++++++++++++++++-------------------- > > xen/arch/arm/vgic.c | 18 ++++++++-------- > > xen/arch/arm/vpl011.c | 6 ++-- > > xen/arch/arm/vtimer.c | 6 ++-- > > xen/include/asm-arm/regs.h | 2 +- > > xen/include/asm-arm/types.h | 4 +++ > > 8 files changed, 45 insertions(+), 39 deletions(-) > > > > diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c > > index 7403f1a..30d014a 100644 > > --- a/xen/arch/arm/domain_build.c > > +++ b/xen/arch/arm/domain_build.c > > @@ -268,7 +268,7 @@ static int prepare_dtb(struct domain *d, struct kernel_info *kinfo) > > > > static void dtb_load(struct kernel_info *kinfo) > > { > > - void * __user dtb_virt = (void *)(u32)kinfo->dtb_paddr; > > + void * __user dtb_virt = (void *)(register_t)kinfo->dtb_paddr; > > > > raw_copy_to_guest(dtb_virt, kinfo->fdt, fdt_totalsize(kinfo->fdt)); > > xfree(kinfo->fdt); > > diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c > > index 86379b7..d8eb5d3 100644 > > --- a/xen/arch/arm/smpboot.c > > +++ b/xen/arch/arm/smpboot.c > > @@ -142,7 +142,7 @@ void __cpuinit start_secondary(unsigned long boot_phys_offset, > > set_processor_id(cpuid); > > > > /* Setup Hyp vector base */ > > - WRITE_CP32((uint32_t) hyp_traps_vector, HVBAR); > > + WRITE_CP32((register_t) hyp_traps_vector, HVBAR); > > > > mmu_init_secondary_cpu(); > > enable_vfp(); > > diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c > > index eaf1f52..0299b33 100644 > > --- a/xen/arch/arm/traps.c > > +++ b/xen/arch/arm/traps.c > > @@ -68,7 +68,7 @@ static void print_xen_info(void) > > debug_build() ? ''y'' : ''n'', print_tainted(taint_str)); > > } > > > > -uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg) > > +register_t *select_user_reg(struct cpu_user_regs *regs, int reg) > > { > > BUG_ON( !guest_mode(regs) ); > > > > @@ -81,20 +81,20 @@ uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg) > > > > switch ( reg ) { > > case 0 ... 7: /* Unbanked registers */ > > - BUILD_BUG_ON(REGOFFS(r0) + 7*sizeof(uint32_t) != REGOFFS(r7)); > > + BUILD_BUG_ON(REGOFFS(r0) + 7*sizeof(register_t) != REGOFFS(r7)); > > return ®s->r0 + reg; > > case 8 ... 12: /* Register banked in FIQ mode */ > > - BUILD_BUG_ON(REGOFFS(r8_fiq) + 4*sizeof(uint32_t) != REGOFFS(r12_fiq)); > > + BUILD_BUG_ON(REGOFFS(r8_fiq) + 4*sizeof(register_t) != REGOFFS(r12_fiq)); > > if ( fiq_mode(regs) ) > > return ®s->r8_fiq + reg - 8; > > else > > return ®s->r8 + reg - 8; > > case 13 ... 14: /* Banked SP + LR registers */ > > - BUILD_BUG_ON(REGOFFS(sp_fiq) + 1*sizeof(uint32_t) != REGOFFS(lr_fiq)); > > - BUILD_BUG_ON(REGOFFS(sp_irq) + 1*sizeof(uint32_t) != REGOFFS(lr_irq)); > > - BUILD_BUG_ON(REGOFFS(sp_svc) + 1*sizeof(uint32_t) != REGOFFS(lr_svc)); > > - BUILD_BUG_ON(REGOFFS(sp_abt) + 1*sizeof(uint32_t) != REGOFFS(lr_abt)); > > - BUILD_BUG_ON(REGOFFS(sp_und) + 1*sizeof(uint32_t) != REGOFFS(lr_und)); > > + BUILD_BUG_ON(REGOFFS(sp_fiq) + 1*sizeof(register_t) != REGOFFS(lr_fiq)); > > + BUILD_BUG_ON(REGOFFS(sp_irq) + 1*sizeof(register_t) != REGOFFS(lr_irq)); > > + BUILD_BUG_ON(REGOFFS(sp_svc) + 1*sizeof(register_t) != REGOFFS(lr_svc)); > > + BUILD_BUG_ON(REGOFFS(sp_abt) + 1*sizeof(register_t) != REGOFFS(lr_abt)); > > + BUILD_BUG_ON(REGOFFS(sp_und) + 1*sizeof(register_t) != REGOFFS(lr_und)); > > switch ( regs->cpsr & PSR_MODE_MASK ) > > { > > case PSR_MODE_USR: > > @@ -315,11 +315,11 @@ static void show_guest_stack(struct cpu_user_regs *regs) > > printk("GUEST STACK GOES HERE\n"); > > } > > > > -#define STACK_BEFORE_EXCEPTION(regs) ((uint32_t*)(regs)->sp) > > +#define STACK_BEFORE_EXCEPTION(regs) ((register_t*)(regs)->sp) > > > > static void show_trace(struct cpu_user_regs *regs) > > { > > - uint32_t *frame, next, addr, low, high; > > + register_t *frame, next, addr, low, high; > > > > printk("Xen call trace:\n "); > > > > @@ -327,7 +327,7 @@ static void show_trace(struct cpu_user_regs *regs) > > print_symbol(" %s\n ", regs->pc); > > > > /* Bounds for range of valid frame pointer. */ > > - low = (uint32_t)(STACK_BEFORE_EXCEPTION(regs)/* - 2*/); > > + low = (register_t)(STACK_BEFORE_EXCEPTION(regs)/* - 2*/); > > high = (low & ~(STACK_SIZE - 1)) + > > (STACK_SIZE - sizeof(struct cpu_info)); > > > > @@ -356,7 +356,7 @@ static void show_trace(struct cpu_user_regs *regs) > > break; > > { > > /* Ordinary stack frame. */ > > - frame = (uint32_t *)next; > > + frame = (register_t *)next; > > next = frame[-1]; > > addr = frame[0]; > > } > > @@ -364,7 +364,7 @@ static void show_trace(struct cpu_user_regs *regs) > > printk("[<%p>]", _p(addr)); > > print_symbol(" %s\n ", addr); > > > > - low = (uint32_t)&frame[1]; > > + low = (register_t)&frame[1]; > > } > > > > printk("\n"); > > @@ -372,7 +372,7 @@ static void show_trace(struct cpu_user_regs *regs) > > > > void show_stack(struct cpu_user_regs *regs) > > { > > - uint32_t *stack = STACK_BEFORE_EXCEPTION(regs), addr; > > + register_t *stack = STACK_BEFORE_EXCEPTION(regs), addr; > > int i; > > > > if ( guest_mode(regs) ) > > @@ -486,20 +486,22 @@ static arm_hypercall_t arm_hypercall_table[] = { > > > > static void do_debug_trap(struct cpu_user_regs *regs, unsigned int code) > > { > > - uint32_t reg, *r; > > + register_t *r; > > + uint32_t reg; > > uint32_t domid = current->domain->domain_id; > > switch ( code ) { > > case 0xe0 ... 0xef: > > reg = code - 0xe0; > > r = select_user_reg(regs, reg); > > - printk("DOM%d: R%d = %#010"PRIx32" at %#010"PRIx32"\n", > > + printk("DOM%d: R%d = 0x%"PRIregister" at 0x%"PRIvaddr"\n", > > domid, reg, *r, regs->pc); > > break; > > case 0xfd: > > - printk("DOM%d: Reached %#010"PRIx32"\n", domid, regs->pc); > > + printk("DOM%d: Reached %"PRIvaddr"\n", domid, regs->pc); > > break; > > case 0xfe: > > - printk("%c", (char)(regs->r0 & 0xff)); > > + r = select_user_reg(regs, 0); > > + printk("%c", (char)(*r & 0xff)); > > break; > > case 0xff: > > printk("DOM%d: DEBUG\n", domid); > > @@ -561,7 +563,7 @@ static void do_cp15_32(struct cpu_user_regs *regs, > > union hsr hsr) > > { > > struct hsr_cp32 cp32 = hsr.cp32; > > - uint32_t *r = select_user_reg(regs, cp32.reg); > > + uint32_t *r = (uint32_t*)select_user_reg(regs, cp32.reg); > > > > if ( !cp32.ccvalid ) { > > dprintk(XENLOG_ERR, "cp_15(32): need to handle invalid condition codes\n"); > > @@ -607,7 +609,7 @@ static void do_cp15_32(struct cpu_user_regs *regs, > > BUG_ON(!vtimer_emulate(regs, hsr)); > > break; > > default: > > - printk("%s p15, %d, r%d, cr%d, cr%d, %d @ %#08x\n", > > + printk("%s p15, %d, r%d, cr%d, cr%d, %d @ 0x%"PRIregister"\n", > > cp32.read ? "mrc" : "mcr", > > cp32.op1, cp32.reg, cp32.crn, cp32.crm, cp32.op2, regs->pc); > > panic("unhandled 32-bit CP15 access %#x\n", hsr.bits & HSR_CP32_REGS_MASK); > > @@ -637,7 +639,7 @@ static void do_cp15_64(struct cpu_user_regs *regs, > > BUG_ON(!vtimer_emulate(regs, hsr)); > > break; > > default: > > - printk("%s p15, %d, r%d, r%d, cr%d @ %#08x\n", > > + printk("%s p15, %d, r%d, r%d, cr%d @ 0x%"PRIregister"\n", > > cp64.read ? "mrrc" : "mcrr", > > cp64.op1, cp64.reg1, cp64.reg2, cp64.crm, regs->pc); > > panic("unhandled 64-bit CP15 access %#x\n", hsr.bits & HSR_CP64_REGS_MASK); > > diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c > > index 39b9775..57147d5 100644 > > --- a/xen/arch/arm/vgic.c > > +++ b/xen/arch/arm/vgic.c > > @@ -160,7 +160,7 @@ static int vgic_distr_mmio_read(struct vcpu *v, mmio_info_t *info) > > { > > struct hsr_dabt dabt = info->dabt; > > struct cpu_user_regs *regs = guest_cpu_user_regs(); > > - uint32_t *r = select_user_reg(regs, dabt.reg); > > + register_t *r = select_user_reg(regs, dabt.reg); > > struct vgic_irq_rank *rank; > > int offset = (int)(info->gpa - VGIC_DISTR_BASE_ADDRESS); > > int gicd_reg = REG(offset); > > @@ -372,7 +372,7 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info) > > { > > struct hsr_dabt dabt = info->dabt; > > struct cpu_user_regs *regs = guest_cpu_user_regs(); > > - uint32_t *r = select_user_reg(regs, dabt.reg); > > + register_t *r = select_user_reg(regs, dabt.reg); > > struct vgic_irq_rank *rank; > > int offset = (int)(info->gpa - VGIC_DISTR_BASE_ADDRESS); > > int gicd_reg = REG(offset); > > @@ -421,13 +421,13 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info) > > > > case GICD_ISPENDR ... GICD_ISPENDRN: > > if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width; > > - printk("vGICD: unhandled %s write %#"PRIx32" to ISPENDR%d\n", > > + printk("vGICD: unhandled %s write %#"PRIregister" to ISPENDR%d\n", > > dabt.size ? "word" : "byte", *r, gicd_reg - GICD_ISPENDR); > > return 0; > > > > case GICD_ICPENDR ... GICD_ICPENDRN: > > if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width; > > - printk("vGICD: unhandled %s write %#"PRIx32" to ICPENDR%d\n", > > + printk("vGICD: unhandled %s write %#"PRIregister" to ICPENDR%d\n", > > dabt.size ? "word" : "byte", *r, gicd_reg - GICD_ICPENDR); > > return 0; > > > > @@ -499,19 +499,19 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info) > > > > case GICD_SGIR: > > if ( dabt.size != 2 ) goto bad_width; > > - printk("vGICD: unhandled write %#"PRIx32" to ICFGR%d\n", > > + printk("vGICD: unhandled write %#"PRIregister" to ICFGR%d\n", > > *r, gicd_reg - GICD_ICFGR); > > return 0; > > > > case GICD_CPENDSGIR ... GICD_CPENDSGIRN: > > if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width; > > - printk("vGICD: unhandled %s write %#"PRIx32" to ICPENDSGIR%d\n", > > + printk("vGICD: unhandled %s write %#"PRIregister" to ICPENDSGIR%d\n", > > dabt.size ? "word" : "byte", *r, gicd_reg - GICD_CPENDSGIR); > > return 0; > > > > case GICD_SPENDSGIR ... GICD_SPENDSGIRN: > > if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width; > > - printk("vGICD: unhandled %s write %#"PRIx32" to ISPENDSGIR%d\n", > > + printk("vGICD: unhandled %s write %#"PRIregister" to ISPENDSGIR%d\n", > > dabt.size ? "word" : "byte", *r, gicd_reg - GICD_SPENDSGIR); > > return 0; > > > > @@ -537,13 +537,13 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info) > > goto write_ignore; > > > > default: > > - printk("vGICD: unhandled write r%d=%"PRIx32" offset %#08x\n", > > + printk("vGICD: unhandled write r%d=%"PRIregister" offset %#08x\n", > > dabt.reg, *r, offset); > > return 0; > > } > > > > bad_width: > > - printk("vGICD: bad write width %d r%d=%"PRIx32" offset %#08x\n", > > + printk("vGICD: bad write width %d r%d=%"PRIregister" offset %#08x\n", > > dabt.size, dabt.reg, *r, offset); > > domain_crash_synchronous(); > > return 0; > > diff --git a/xen/arch/arm/vpl011.c b/xen/arch/arm/vpl011.c > > index 7dcee90..db5094e 100644 > > --- a/xen/arch/arm/vpl011.c > > +++ b/xen/arch/arm/vpl011.c > > @@ -92,7 +92,7 @@ static int uart0_mmio_read(struct vcpu *v, mmio_info_t *info) > > { > > struct hsr_dabt dabt = info->dabt; > > struct cpu_user_regs *regs = guest_cpu_user_regs(); > > - uint32_t *r = select_user_reg(regs, dabt.reg); > > + register_t *r = select_user_reg(regs, dabt.reg); > > int offset = (int)(info->gpa - UART0_START); > > > > switch ( offset ) > > @@ -114,7 +114,7 @@ static int uart0_mmio_write(struct vcpu *v, mmio_info_t *info) > > { > > struct hsr_dabt dabt = info->dabt; > > struct cpu_user_regs *regs = guest_cpu_user_regs(); > > - uint32_t *r = select_user_reg(regs, dabt.reg); > > + register_t *r = select_user_reg(regs, dabt.reg); > > int offset = (int)(info->gpa - UART0_START); > > > > switch ( offset ) > > @@ -127,7 +127,7 @@ static int uart0_mmio_write(struct vcpu *v, mmio_info_t *info) > > /* Silently ignore */ > > return 1; > > default: > > - printk("VPL011: unhandled write r%d=%"PRIx32" offset %#08x\n", > > + printk("VPL011: unhandled write r%d=%"PRIregister" offset %#08x\n", > > dabt.reg, *r, offset); > > domain_crash_synchronous(); > > } > > diff --git a/xen/arch/arm/vtimer.c b/xen/arch/arm/vtimer.c > > index 85201b5..291b87e 100644 > > --- a/xen/arch/arm/vtimer.c > > +++ b/xen/arch/arm/vtimer.c > > @@ -99,7 +99,7 @@ static int vtimer_emulate_32(struct cpu_user_regs *regs, union hsr hsr) > > { > > struct vcpu *v = current; > > struct hsr_cp32 cp32 = hsr.cp32; > > - uint32_t *r = select_user_reg(regs, cp32.reg); > > + uint32_t *r = (uint32_t *)select_user_reg(regs, cp32.reg); > > s_time_t now; > > > > switch ( hsr.bits & HSR_CP32_REGS_MASK ) > > @@ -151,8 +151,8 @@ static int vtimer_emulate_64(struct cpu_user_regs *regs, union hsr hsr) > > { > > struct vcpu *v = current; > > struct hsr_cp64 cp64 = hsr.cp64; > > - uint32_t *r1 = select_user_reg(regs, cp64.reg1); > > - uint32_t *r2 = select_user_reg(regs, cp64.reg2); > > + uint32_t *r1 = (uint32_t *)select_user_reg(regs, cp64.reg1); > > + uint32_t *r2 = (uint32_t *)select_user_reg(regs, cp64.reg2); > > uint64_t ticks; > > s_time_t now; > > > > diff --git a/xen/include/asm-arm/regs.h b/xen/include/asm-arm/regs.h > > index 7486944..a723f92 100644 > > --- a/xen/include/asm-arm/regs.h > > +++ b/xen/include/asm-arm/regs.h > > @@ -34,7 +34,7 @@ > > * Returns a pointer to the given register value in regs, taking the > > * processor mode (CPSR) into account. > > */ > > -extern uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg); > > +extern register_t *select_user_reg(struct cpu_user_regs *regs, int reg); > > > > #endif /* __ARM_REGS_H__ */ > > /* > > diff --git a/xen/include/asm-arm/types.h b/xen/include/asm-arm/types.h > > index d3e16d8..9ca32f1 100644 > > --- a/xen/include/asm-arm/types.h > > +++ b/xen/include/asm-arm/types.h > > @@ -41,6 +41,8 @@ typedef u32 vaddr_t; > > typedef u64 paddr_t; > > #define INVALID_PADDR (~0ULL) > > #define PRIpaddr "016llx" > > +typedef u32 register_t; > > +#define PRIregister "x" > > #elif defined (CONFIG_ARM_64) > > typedef signed long s64; > > typedef unsigned long u64; > > @@ -49,6 +51,8 @@ typedef u64 vaddr_t; > > typedef u64 paddr_t; > > #define INVALID_PADDR (~0UL) > > #define PRIpaddr "016lx" > > +typedef u64 register_t; > > +#define PRIregister "lx" > > #endif > > > > typedef unsigned long size_t; > > -- > > 1.7.2.5 > > > > >
Stefano Stabellini
2013-Feb-21 16:33 UTC
Re: [PATCH V2 20/46] xen: arm64: add to foreign struct checks
On Thu, 14 Feb 2013, Ian Campbell wrote:> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>> .gitignore | 1 + > tools/include/xen-foreign/Makefile | 5 ++++- > tools/include/xen-foreign/mkheader.py | 19 +++++++++++++++++++ > tools/include/xen-foreign/reference.size | 20 ++++++++++---------- > tools/include/xen-foreign/structs.py | 1 + > 5 files changed, 35 insertions(+), 11 deletions(-) > > diff --git a/.gitignore b/.gitignore > index 73c5b77..2242344 100644 > --- a/.gitignore > +++ b/.gitignore > @@ -364,6 +364,7 @@ tools/include/xen-foreign/structs.pyc > tools/include/xen-foreign/x86_32.h > tools/include/xen-foreign/x86_64.h > tools/include/xen-foreign/arm32.h > +tools/include/xen-foreign/arm64.h > > .git > tools/misc/xen-hptool > diff --git a/tools/include/xen-foreign/Makefile b/tools/include/xen-foreign/Makefile > index 53cc6b4..06b844c 100644 > --- a/tools/include/xen-foreign/Makefile > +++ b/tools/include/xen-foreign/Makefile > @@ -3,7 +3,7 @@ include $(XEN_ROOT)/tools/Rules.mk > > ROOT = $(XEN_ROOT)/xen/include/public > > -architectures := arm32 x86_32 x86_64 > +architectures := arm32 arm64 x86_32 x86_64 > headers := $(patsubst %, %.h, $(architectures)) > > .PHONY: all clean check-headers > @@ -25,6 +25,9 @@ check-headers: checker > arm32.h: mkheader.py structs.py $(ROOT)/arch-arm.h $(ROOT)/xen.h > $(PYTHON) $< $* $@ $(filter %.h,$^) > > +arm64.h: mkheader.py structs.py $(ROOT)/arch-arm.h $(ROOT)/xen.h > + $(PYTHON) $< $* $@ $(filter %.h,$^) > + > x86_32.h: mkheader.py structs.py $(ROOT)/arch-x86/xen-x86_32.h $(ROOT)/arch-x86/xen.h $(ROOT)/xen.h > $(PYTHON) $< $* $@ $(filter %.h,$^) > > diff --git a/tools/include/xen-foreign/mkheader.py b/tools/include/xen-foreign/mkheader.py > index b7c34b1..4858687 100644 > --- a/tools/include/xen-foreign/mkheader.py > +++ b/tools/include/xen-foreign/mkheader.py > @@ -26,6 +26,22 @@ inttypes["arm32"] = { > header["arm32"] = """ > #define __arm___ARM32 1 > """; > +footer["arm32"] = """ > +#undef __DECL_REG > +""" > + > +inttypes["arm64"] = { > + "unsigned long" : "__danger_unsigned_long_on_arm64", > + "long" : "__danger_long_on_arm64", > + "xen_pfn_t" : "uint64_t", > + "xen_ulong_t" : "uint64_t", > +}; > +header["arm64"] = """ > +#define __aarch64___ARM64 1 > +"""; > +footer["arm64"] = """ > +#undef __DECL_REG > +""" > > # x86_32 > inttypes["x86_32"] = { > @@ -59,6 +75,9 @@ header["x86_64"] = """ > #endif > #define __x86_64___X86_64 1 > """; > +footer["x86_64"] = """ > +#undef __DECL_REG > +""" > > ########################################################################### > # main > diff --git a/tools/include/xen-foreign/reference.size b/tools/include/xen-foreign/reference.size > index 0e5529d..7659c64 100644 > --- a/tools/include/xen-foreign/reference.size > +++ b/tools/include/xen-foreign/reference.size > @@ -1,13 +1,13 @@ > > -structs | arm32 x86_32 x86_64 > +structs | arm32 arm64 x86_32 x86_64 > > -start_info | - 1112 1168 > -trap_info | - 8 16 > -cpu_user_regs | 160 68 200 > -vcpu_guest_context | 180 2800 5168 > -arch_vcpu_info | 0 24 16 > -vcpu_time_info | 32 32 32 > -vcpu_info | 48 64 64 > -arch_shared_info | 0 268 280 > -shared_info | 1088 2584 3368 > +start_info | - - 1112 1168 > +trap_info | - - 8 16 > +cpu_user_regs | 160 160 68 200 > +vcpu_guest_context | 180 180 2800 5168 > +arch_vcpu_info | 0 0 24 16 > +vcpu_time_info | 32 32 32 32 > +vcpu_info | 48 48 64 64 > +arch_shared_info | 0 0 268 280 > +shared_info | 1088 1088 2584 3368 > > diff --git a/tools/include/xen-foreign/structs.py b/tools/include/xen-foreign/structs.py > index 51a77c0..5aec2c5 100644 > --- a/tools/include/xen-foreign/structs.py > +++ b/tools/include/xen-foreign/structs.py > @@ -14,6 +14,7 @@ structs = [ "start_info", > "shared_info" ]; > > defines = [ "__arm__", > + "__aarch64__", > "__i386__", > "__x86_64__", > > -- > 1.7.2.5 >
Ian Campbell
2013-Feb-21 16:46 UTC
Re: [PATCH V2 23/46] xen: arm: add register_t type, native register size for the hypervisor
On Thu, 2013-02-21 at 16:23 +0000, Stefano Stabellini wrote:> On Thu, 21 Feb 2013, Ian Campbell wrote: > > On Thu, 2013-02-14 at 16:47 +0000, Ian Campbell wrote: > > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > > > Acked-by: Tim Deegan <tim@xen.org> > > > but: > > > This is mostly a matter of coding taste, so I''d like Stefano''s > > > ack/nack here as well. > > > > Stefano, any strong opinion? > > Are there any concrete benefits in introducing register_t compared to > using unsigned long?It decouples us from assuming a compiler where unsigned long is the size of a register ;-) In the ARM port we have mostly been trying to define and use fixed size and/or semantic types (uintXX_t, paddr_t etc) rather than compiler variant things like int and long. Ian.> > > > > xen/arch/arm/domain_build.c | 2 +- > > > xen/arch/arm/smpboot.c | 2 +- > > > xen/arch/arm/traps.c | 44 ++++++++++++++++++++++-------------------- > > > xen/arch/arm/vgic.c | 18 ++++++++-------- > > > xen/arch/arm/vpl011.c | 6 ++-- > > > xen/arch/arm/vtimer.c | 6 ++-- > > > xen/include/asm-arm/regs.h | 2 +- > > > xen/include/asm-arm/types.h | 4 +++ > > > 8 files changed, 45 insertions(+), 39 deletions(-) > > > > > > diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c > > > index 7403f1a..30d014a 100644 > > > --- a/xen/arch/arm/domain_build.c > > > +++ b/xen/arch/arm/domain_build.c > > > @@ -268,7 +268,7 @@ static int prepare_dtb(struct domain *d, struct kernel_info *kinfo) > > > > > > static void dtb_load(struct kernel_info *kinfo) > > > { > > > - void * __user dtb_virt = (void *)(u32)kinfo->dtb_paddr; > > > + void * __user dtb_virt = (void *)(register_t)kinfo->dtb_paddr; > > > > > > raw_copy_to_guest(dtb_virt, kinfo->fdt, fdt_totalsize(kinfo->fdt)); > > > xfree(kinfo->fdt); > > > diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c > > > index 86379b7..d8eb5d3 100644 > > > --- a/xen/arch/arm/smpboot.c > > > +++ b/xen/arch/arm/smpboot.c > > > @@ -142,7 +142,7 @@ void __cpuinit start_secondary(unsigned long boot_phys_offset, > > > set_processor_id(cpuid); > > > > > > /* Setup Hyp vector base */ > > > - WRITE_CP32((uint32_t) hyp_traps_vector, HVBAR); > > > + WRITE_CP32((register_t) hyp_traps_vector, HVBAR); > > > > > > mmu_init_secondary_cpu(); > > > enable_vfp(); > > > diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c > > > index eaf1f52..0299b33 100644 > > > --- a/xen/arch/arm/traps.c > > > +++ b/xen/arch/arm/traps.c > > > @@ -68,7 +68,7 @@ static void print_xen_info(void) > > > debug_build() ? ''y'' : ''n'', print_tainted(taint_str)); > > > } > > > > > > -uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg) > > > +register_t *select_user_reg(struct cpu_user_regs *regs, int reg) > > > { > > > BUG_ON( !guest_mode(regs) ); > > > > > > @@ -81,20 +81,20 @@ uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg) > > > > > > switch ( reg ) { > > > case 0 ... 7: /* Unbanked registers */ > > > - BUILD_BUG_ON(REGOFFS(r0) + 7*sizeof(uint32_t) != REGOFFS(r7)); > > > + BUILD_BUG_ON(REGOFFS(r0) + 7*sizeof(register_t) != REGOFFS(r7)); > > > return ®s->r0 + reg; > > > case 8 ... 12: /* Register banked in FIQ mode */ > > > - BUILD_BUG_ON(REGOFFS(r8_fiq) + 4*sizeof(uint32_t) != REGOFFS(r12_fiq)); > > > + BUILD_BUG_ON(REGOFFS(r8_fiq) + 4*sizeof(register_t) != REGOFFS(r12_fiq)); > > > if ( fiq_mode(regs) ) > > > return ®s->r8_fiq + reg - 8; > > > else > > > return ®s->r8 + reg - 8; > > > case 13 ... 14: /* Banked SP + LR registers */ > > > - BUILD_BUG_ON(REGOFFS(sp_fiq) + 1*sizeof(uint32_t) != REGOFFS(lr_fiq)); > > > - BUILD_BUG_ON(REGOFFS(sp_irq) + 1*sizeof(uint32_t) != REGOFFS(lr_irq)); > > > - BUILD_BUG_ON(REGOFFS(sp_svc) + 1*sizeof(uint32_t) != REGOFFS(lr_svc)); > > > - BUILD_BUG_ON(REGOFFS(sp_abt) + 1*sizeof(uint32_t) != REGOFFS(lr_abt)); > > > - BUILD_BUG_ON(REGOFFS(sp_und) + 1*sizeof(uint32_t) != REGOFFS(lr_und)); > > > + BUILD_BUG_ON(REGOFFS(sp_fiq) + 1*sizeof(register_t) != REGOFFS(lr_fiq)); > > > + BUILD_BUG_ON(REGOFFS(sp_irq) + 1*sizeof(register_t) != REGOFFS(lr_irq)); > > > + BUILD_BUG_ON(REGOFFS(sp_svc) + 1*sizeof(register_t) != REGOFFS(lr_svc)); > > > + BUILD_BUG_ON(REGOFFS(sp_abt) + 1*sizeof(register_t) != REGOFFS(lr_abt)); > > > + BUILD_BUG_ON(REGOFFS(sp_und) + 1*sizeof(register_t) != REGOFFS(lr_und)); > > > switch ( regs->cpsr & PSR_MODE_MASK ) > > > { > > > case PSR_MODE_USR: > > > @@ -315,11 +315,11 @@ static void show_guest_stack(struct cpu_user_regs *regs) > > > printk("GUEST STACK GOES HERE\n"); > > > } > > > > > > -#define STACK_BEFORE_EXCEPTION(regs) ((uint32_t*)(regs)->sp) > > > +#define STACK_BEFORE_EXCEPTION(regs) ((register_t*)(regs)->sp) > > > > > > static void show_trace(struct cpu_user_regs *regs) > > > { > > > - uint32_t *frame, next, addr, low, high; > > > + register_t *frame, next, addr, low, high; > > > > > > printk("Xen call trace:\n "); > > > > > > @@ -327,7 +327,7 @@ static void show_trace(struct cpu_user_regs *regs) > > > print_symbol(" %s\n ", regs->pc); > > > > > > /* Bounds for range of valid frame pointer. */ > > > - low = (uint32_t)(STACK_BEFORE_EXCEPTION(regs)/* - 2*/); > > > + low = (register_t)(STACK_BEFORE_EXCEPTION(regs)/* - 2*/); > > > high = (low & ~(STACK_SIZE - 1)) + > > > (STACK_SIZE - sizeof(struct cpu_info)); > > > > > > @@ -356,7 +356,7 @@ static void show_trace(struct cpu_user_regs *regs) > > > break; > > > { > > > /* Ordinary stack frame. */ > > > - frame = (uint32_t *)next; > > > + frame = (register_t *)next; > > > next = frame[-1]; > > > addr = frame[0]; > > > } > > > @@ -364,7 +364,7 @@ static void show_trace(struct cpu_user_regs *regs) > > > printk("[<%p>]", _p(addr)); > > > print_symbol(" %s\n ", addr); > > > > > > - low = (uint32_t)&frame[1]; > > > + low = (register_t)&frame[1]; > > > } > > > > > > printk("\n"); > > > @@ -372,7 +372,7 @@ static void show_trace(struct cpu_user_regs *regs) > > > > > > void show_stack(struct cpu_user_regs *regs) > > > { > > > - uint32_t *stack = STACK_BEFORE_EXCEPTION(regs), addr; > > > + register_t *stack = STACK_BEFORE_EXCEPTION(regs), addr; > > > int i; > > > > > > if ( guest_mode(regs) ) > > > @@ -486,20 +486,22 @@ static arm_hypercall_t arm_hypercall_table[] = { > > > > > > static void do_debug_trap(struct cpu_user_regs *regs, unsigned int code) > > > { > > > - uint32_t reg, *r; > > > + register_t *r; > > > + uint32_t reg; > > > uint32_t domid = current->domain->domain_id; > > > switch ( code ) { > > > case 0xe0 ... 0xef: > > > reg = code - 0xe0; > > > r = select_user_reg(regs, reg); > > > - printk("DOM%d: R%d = %#010"PRIx32" at %#010"PRIx32"\n", > > > + printk("DOM%d: R%d = 0x%"PRIregister" at 0x%"PRIvaddr"\n", > > > domid, reg, *r, regs->pc); > > > break; > > > case 0xfd: > > > - printk("DOM%d: Reached %#010"PRIx32"\n", domid, regs->pc); > > > + printk("DOM%d: Reached %"PRIvaddr"\n", domid, regs->pc); > > > break; > > > case 0xfe: > > > - printk("%c", (char)(regs->r0 & 0xff)); > > > + r = select_user_reg(regs, 0); > > > + printk("%c", (char)(*r & 0xff)); > > > break; > > > case 0xff: > > > printk("DOM%d: DEBUG\n", domid); > > > @@ -561,7 +563,7 @@ static void do_cp15_32(struct cpu_user_regs *regs, > > > union hsr hsr) > > > { > > > struct hsr_cp32 cp32 = hsr.cp32; > > > - uint32_t *r = select_user_reg(regs, cp32.reg); > > > + uint32_t *r = (uint32_t*)select_user_reg(regs, cp32.reg); > > > > > > if ( !cp32.ccvalid ) { > > > dprintk(XENLOG_ERR, "cp_15(32): need to handle invalid condition codes\n"); > > > @@ -607,7 +609,7 @@ static void do_cp15_32(struct cpu_user_regs *regs, > > > BUG_ON(!vtimer_emulate(regs, hsr)); > > > break; > > > default: > > > - printk("%s p15, %d, r%d, cr%d, cr%d, %d @ %#08x\n", > > > + printk("%s p15, %d, r%d, cr%d, cr%d, %d @ 0x%"PRIregister"\n", > > > cp32.read ? "mrc" : "mcr", > > > cp32.op1, cp32.reg, cp32.crn, cp32.crm, cp32.op2, regs->pc); > > > panic("unhandled 32-bit CP15 access %#x\n", hsr.bits & HSR_CP32_REGS_MASK); > > > @@ -637,7 +639,7 @@ static void do_cp15_64(struct cpu_user_regs *regs, > > > BUG_ON(!vtimer_emulate(regs, hsr)); > > > break; > > > default: > > > - printk("%s p15, %d, r%d, r%d, cr%d @ %#08x\n", > > > + printk("%s p15, %d, r%d, r%d, cr%d @ 0x%"PRIregister"\n", > > > cp64.read ? "mrrc" : "mcrr", > > > cp64.op1, cp64.reg1, cp64.reg2, cp64.crm, regs->pc); > > > panic("unhandled 64-bit CP15 access %#x\n", hsr.bits & HSR_CP64_REGS_MASK); > > > diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c > > > index 39b9775..57147d5 100644 > > > --- a/xen/arch/arm/vgic.c > > > +++ b/xen/arch/arm/vgic.c > > > @@ -160,7 +160,7 @@ static int vgic_distr_mmio_read(struct vcpu *v, mmio_info_t *info) > > > { > > > struct hsr_dabt dabt = info->dabt; > > > struct cpu_user_regs *regs = guest_cpu_user_regs(); > > > - uint32_t *r = select_user_reg(regs, dabt.reg); > > > + register_t *r = select_user_reg(regs, dabt.reg); > > > struct vgic_irq_rank *rank; > > > int offset = (int)(info->gpa - VGIC_DISTR_BASE_ADDRESS); > > > int gicd_reg = REG(offset); > > > @@ -372,7 +372,7 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info) > > > { > > > struct hsr_dabt dabt = info->dabt; > > > struct cpu_user_regs *regs = guest_cpu_user_regs(); > > > - uint32_t *r = select_user_reg(regs, dabt.reg); > > > + register_t *r = select_user_reg(regs, dabt.reg); > > > struct vgic_irq_rank *rank; > > > int offset = (int)(info->gpa - VGIC_DISTR_BASE_ADDRESS); > > > int gicd_reg = REG(offset); > > > @@ -421,13 +421,13 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info) > > > > > > case GICD_ISPENDR ... GICD_ISPENDRN: > > > if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width; > > > - printk("vGICD: unhandled %s write %#"PRIx32" to ISPENDR%d\n", > > > + printk("vGICD: unhandled %s write %#"PRIregister" to ISPENDR%d\n", > > > dabt.size ? "word" : "byte", *r, gicd_reg - GICD_ISPENDR); > > > return 0; > > > > > > case GICD_ICPENDR ... GICD_ICPENDRN: > > > if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width; > > > - printk("vGICD: unhandled %s write %#"PRIx32" to ICPENDR%d\n", > > > + printk("vGICD: unhandled %s write %#"PRIregister" to ICPENDR%d\n", > > > dabt.size ? "word" : "byte", *r, gicd_reg - GICD_ICPENDR); > > > return 0; > > > > > > @@ -499,19 +499,19 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info) > > > > > > case GICD_SGIR: > > > if ( dabt.size != 2 ) goto bad_width; > > > - printk("vGICD: unhandled write %#"PRIx32" to ICFGR%d\n", > > > + printk("vGICD: unhandled write %#"PRIregister" to ICFGR%d\n", > > > *r, gicd_reg - GICD_ICFGR); > > > return 0; > > > > > > case GICD_CPENDSGIR ... GICD_CPENDSGIRN: > > > if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width; > > > - printk("vGICD: unhandled %s write %#"PRIx32" to ICPENDSGIR%d\n", > > > + printk("vGICD: unhandled %s write %#"PRIregister" to ICPENDSGIR%d\n", > > > dabt.size ? "word" : "byte", *r, gicd_reg - GICD_CPENDSGIR); > > > return 0; > > > > > > case GICD_SPENDSGIR ... GICD_SPENDSGIRN: > > > if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width; > > > - printk("vGICD: unhandled %s write %#"PRIx32" to ISPENDSGIR%d\n", > > > + printk("vGICD: unhandled %s write %#"PRIregister" to ISPENDSGIR%d\n", > > > dabt.size ? "word" : "byte", *r, gicd_reg - GICD_SPENDSGIR); > > > return 0; > > > > > > @@ -537,13 +537,13 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info) > > > goto write_ignore; > > > > > > default: > > > - printk("vGICD: unhandled write r%d=%"PRIx32" offset %#08x\n", > > > + printk("vGICD: unhandled write r%d=%"PRIregister" offset %#08x\n", > > > dabt.reg, *r, offset); > > > return 0; > > > } > > > > > > bad_width: > > > - printk("vGICD: bad write width %d r%d=%"PRIx32" offset %#08x\n", > > > + printk("vGICD: bad write width %d r%d=%"PRIregister" offset %#08x\n", > > > dabt.size, dabt.reg, *r, offset); > > > domain_crash_synchronous(); > > > return 0; > > > diff --git a/xen/arch/arm/vpl011.c b/xen/arch/arm/vpl011.c > > > index 7dcee90..db5094e 100644 > > > --- a/xen/arch/arm/vpl011.c > > > +++ b/xen/arch/arm/vpl011.c > > > @@ -92,7 +92,7 @@ static int uart0_mmio_read(struct vcpu *v, mmio_info_t *info) > > > { > > > struct hsr_dabt dabt = info->dabt; > > > struct cpu_user_regs *regs = guest_cpu_user_regs(); > > > - uint32_t *r = select_user_reg(regs, dabt.reg); > > > + register_t *r = select_user_reg(regs, dabt.reg); > > > int offset = (int)(info->gpa - UART0_START); > > > > > > switch ( offset ) > > > @@ -114,7 +114,7 @@ static int uart0_mmio_write(struct vcpu *v, mmio_info_t *info) > > > { > > > struct hsr_dabt dabt = info->dabt; > > > struct cpu_user_regs *regs = guest_cpu_user_regs(); > > > - uint32_t *r = select_user_reg(regs, dabt.reg); > > > + register_t *r = select_user_reg(regs, dabt.reg); > > > int offset = (int)(info->gpa - UART0_START); > > > > > > switch ( offset ) > > > @@ -127,7 +127,7 @@ static int uart0_mmio_write(struct vcpu *v, mmio_info_t *info) > > > /* Silently ignore */ > > > return 1; > > > default: > > > - printk("VPL011: unhandled write r%d=%"PRIx32" offset %#08x\n", > > > + printk("VPL011: unhandled write r%d=%"PRIregister" offset %#08x\n", > > > dabt.reg, *r, offset); > > > domain_crash_synchronous(); > > > } > > > diff --git a/xen/arch/arm/vtimer.c b/xen/arch/arm/vtimer.c > > > index 85201b5..291b87e 100644 > > > --- a/xen/arch/arm/vtimer.c > > > +++ b/xen/arch/arm/vtimer.c > > > @@ -99,7 +99,7 @@ static int vtimer_emulate_32(struct cpu_user_regs *regs, union hsr hsr) > > > { > > > struct vcpu *v = current; > > > struct hsr_cp32 cp32 = hsr.cp32; > > > - uint32_t *r = select_user_reg(regs, cp32.reg); > > > + uint32_t *r = (uint32_t *)select_user_reg(regs, cp32.reg); > > > s_time_t now; > > > > > > switch ( hsr.bits & HSR_CP32_REGS_MASK ) > > > @@ -151,8 +151,8 @@ static int vtimer_emulate_64(struct cpu_user_regs *regs, union hsr hsr) > > > { > > > struct vcpu *v = current; > > > struct hsr_cp64 cp64 = hsr.cp64; > > > - uint32_t *r1 = select_user_reg(regs, cp64.reg1); > > > - uint32_t *r2 = select_user_reg(regs, cp64.reg2); > > > + uint32_t *r1 = (uint32_t *)select_user_reg(regs, cp64.reg1); > > > + uint32_t *r2 = (uint32_t *)select_user_reg(regs, cp64.reg2); > > > uint64_t ticks; > > > s_time_t now; > > > > > > diff --git a/xen/include/asm-arm/regs.h b/xen/include/asm-arm/regs.h > > > index 7486944..a723f92 100644 > > > --- a/xen/include/asm-arm/regs.h > > > +++ b/xen/include/asm-arm/regs.h > > > @@ -34,7 +34,7 @@ > > > * Returns a pointer to the given register value in regs, taking the > > > * processor mode (CPSR) into account. > > > */ > > > -extern uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg); > > > +extern register_t *select_user_reg(struct cpu_user_regs *regs, int reg); > > > > > > #endif /* __ARM_REGS_H__ */ > > > /* > > > diff --git a/xen/include/asm-arm/types.h b/xen/include/asm-arm/types.h > > > index d3e16d8..9ca32f1 100644 > > > --- a/xen/include/asm-arm/types.h > > > +++ b/xen/include/asm-arm/types.h > > > @@ -41,6 +41,8 @@ typedef u32 vaddr_t; > > > typedef u64 paddr_t; > > > #define INVALID_PADDR (~0ULL) > > > #define PRIpaddr "016llx" > > > +typedef u32 register_t; > > > +#define PRIregister "x" > > > #elif defined (CONFIG_ARM_64) > > > typedef signed long s64; > > > typedef unsigned long u64; > > > @@ -49,6 +51,8 @@ typedef u64 vaddr_t; > > > typedef u64 paddr_t; > > > #define INVALID_PADDR (~0UL) > > > #define PRIpaddr "016lx" > > > +typedef u64 register_t; > > > +#define PRIregister "lx" > > > #endif > > > > > > typedef unsigned long size_t; > > > -- > > > 1.7.2.5 > > > > > > > > >
Stefano Stabellini
2013-Feb-21 16:49 UTC
Re: [PATCH V2 23/46] xen: arm: add register_t type, native register size for the hypervisor
On Thu, 21 Feb 2013, Ian Campbell wrote:> On Thu, 2013-02-21 at 16:23 +0000, Stefano Stabellini wrote: > > On Thu, 21 Feb 2013, Ian Campbell wrote: > > > On Thu, 2013-02-14 at 16:47 +0000, Ian Campbell wrote: > > > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > > > > Acked-by: Tim Deegan <tim@xen.org> > > > > but: > > > > This is mostly a matter of coding taste, so I''d like Stefano''s > > > > ack/nack here as well. > > > > > > Stefano, any strong opinion? > > > > Are there any concrete benefits in introducing register_t compared to > > using unsigned long? > > It decouples us from assuming a compiler where unsigned long is the size > of a register ;-) > > In the ARM port we have mostly been trying to define and use fixed size > and/or semantic types (uintXX_t, paddr_t etc) rather than compiler > variant things like int and long.OK. Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>> > > > > > > > xen/arch/arm/domain_build.c | 2 +- > > > > xen/arch/arm/smpboot.c | 2 +- > > > > xen/arch/arm/traps.c | 44 ++++++++++++++++++++++-------------------- > > > > xen/arch/arm/vgic.c | 18 ++++++++-------- > > > > xen/arch/arm/vpl011.c | 6 ++-- > > > > xen/arch/arm/vtimer.c | 6 ++-- > > > > xen/include/asm-arm/regs.h | 2 +- > > > > xen/include/asm-arm/types.h | 4 +++ > > > > 8 files changed, 45 insertions(+), 39 deletions(-) > > > > > > > > diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c > > > > index 7403f1a..30d014a 100644 > > > > --- a/xen/arch/arm/domain_build.c > > > > +++ b/xen/arch/arm/domain_build.c > > > > @@ -268,7 +268,7 @@ static int prepare_dtb(struct domain *d, struct kernel_info *kinfo) > > > > > > > > static void dtb_load(struct kernel_info *kinfo) > > > > { > > > > - void * __user dtb_virt = (void *)(u32)kinfo->dtb_paddr; > > > > + void * __user dtb_virt = (void *)(register_t)kinfo->dtb_paddr; > > > > > > > > raw_copy_to_guest(dtb_virt, kinfo->fdt, fdt_totalsize(kinfo->fdt)); > > > > xfree(kinfo->fdt); > > > > diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c > > > > index 86379b7..d8eb5d3 100644 > > > > --- a/xen/arch/arm/smpboot.c > > > > +++ b/xen/arch/arm/smpboot.c > > > > @@ -142,7 +142,7 @@ void __cpuinit start_secondary(unsigned long boot_phys_offset, > > > > set_processor_id(cpuid); > > > > > > > > /* Setup Hyp vector base */ > > > > - WRITE_CP32((uint32_t) hyp_traps_vector, HVBAR); > > > > + WRITE_CP32((register_t) hyp_traps_vector, HVBAR); > > > > > > > > mmu_init_secondary_cpu(); > > > > enable_vfp(); > > > > diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c > > > > index eaf1f52..0299b33 100644 > > > > --- a/xen/arch/arm/traps.c > > > > +++ b/xen/arch/arm/traps.c > > > > @@ -68,7 +68,7 @@ static void print_xen_info(void) > > > > debug_build() ? ''y'' : ''n'', print_tainted(taint_str)); > > > > } > > > > > > > > -uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg) > > > > +register_t *select_user_reg(struct cpu_user_regs *regs, int reg) > > > > { > > > > BUG_ON( !guest_mode(regs) ); > > > > > > > > @@ -81,20 +81,20 @@ uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg) > > > > > > > > switch ( reg ) { > > > > case 0 ... 7: /* Unbanked registers */ > > > > - BUILD_BUG_ON(REGOFFS(r0) + 7*sizeof(uint32_t) != REGOFFS(r7)); > > > > + BUILD_BUG_ON(REGOFFS(r0) + 7*sizeof(register_t) != REGOFFS(r7)); > > > > return ®s->r0 + reg; > > > > case 8 ... 12: /* Register banked in FIQ mode */ > > > > - BUILD_BUG_ON(REGOFFS(r8_fiq) + 4*sizeof(uint32_t) != REGOFFS(r12_fiq)); > > > > + BUILD_BUG_ON(REGOFFS(r8_fiq) + 4*sizeof(register_t) != REGOFFS(r12_fiq)); > > > > if ( fiq_mode(regs) ) > > > > return ®s->r8_fiq + reg - 8; > > > > else > > > > return ®s->r8 + reg - 8; > > > > case 13 ... 14: /* Banked SP + LR registers */ > > > > - BUILD_BUG_ON(REGOFFS(sp_fiq) + 1*sizeof(uint32_t) != REGOFFS(lr_fiq)); > > > > - BUILD_BUG_ON(REGOFFS(sp_irq) + 1*sizeof(uint32_t) != REGOFFS(lr_irq)); > > > > - BUILD_BUG_ON(REGOFFS(sp_svc) + 1*sizeof(uint32_t) != REGOFFS(lr_svc)); > > > > - BUILD_BUG_ON(REGOFFS(sp_abt) + 1*sizeof(uint32_t) != REGOFFS(lr_abt)); > > > > - BUILD_BUG_ON(REGOFFS(sp_und) + 1*sizeof(uint32_t) != REGOFFS(lr_und)); > > > > + BUILD_BUG_ON(REGOFFS(sp_fiq) + 1*sizeof(register_t) != REGOFFS(lr_fiq)); > > > > + BUILD_BUG_ON(REGOFFS(sp_irq) + 1*sizeof(register_t) != REGOFFS(lr_irq)); > > > > + BUILD_BUG_ON(REGOFFS(sp_svc) + 1*sizeof(register_t) != REGOFFS(lr_svc)); > > > > + BUILD_BUG_ON(REGOFFS(sp_abt) + 1*sizeof(register_t) != REGOFFS(lr_abt)); > > > > + BUILD_BUG_ON(REGOFFS(sp_und) + 1*sizeof(register_t) != REGOFFS(lr_und)); > > > > switch ( regs->cpsr & PSR_MODE_MASK ) > > > > { > > > > case PSR_MODE_USR: > > > > @@ -315,11 +315,11 @@ static void show_guest_stack(struct cpu_user_regs *regs) > > > > printk("GUEST STACK GOES HERE\n"); > > > > } > > > > > > > > -#define STACK_BEFORE_EXCEPTION(regs) ((uint32_t*)(regs)->sp) > > > > +#define STACK_BEFORE_EXCEPTION(regs) ((register_t*)(regs)->sp) > > > > > > > > static void show_trace(struct cpu_user_regs *regs) > > > > { > > > > - uint32_t *frame, next, addr, low, high; > > > > + register_t *frame, next, addr, low, high; > > > > > > > > printk("Xen call trace:\n "); > > > > > > > > @@ -327,7 +327,7 @@ static void show_trace(struct cpu_user_regs *regs) > > > > print_symbol(" %s\n ", regs->pc); > > > > > > > > /* Bounds for range of valid frame pointer. */ > > > > - low = (uint32_t)(STACK_BEFORE_EXCEPTION(regs)/* - 2*/); > > > > + low = (register_t)(STACK_BEFORE_EXCEPTION(regs)/* - 2*/); > > > > high = (low & ~(STACK_SIZE - 1)) + > > > > (STACK_SIZE - sizeof(struct cpu_info)); > > > > > > > > @@ -356,7 +356,7 @@ static void show_trace(struct cpu_user_regs *regs) > > > > break; > > > > { > > > > /* Ordinary stack frame. */ > > > > - frame = (uint32_t *)next; > > > > + frame = (register_t *)next; > > > > next = frame[-1]; > > > > addr = frame[0]; > > > > } > > > > @@ -364,7 +364,7 @@ static void show_trace(struct cpu_user_regs *regs) > > > > printk("[<%p>]", _p(addr)); > > > > print_symbol(" %s\n ", addr); > > > > > > > > - low = (uint32_t)&frame[1]; > > > > + low = (register_t)&frame[1]; > > > > } > > > > > > > > printk("\n"); > > > > @@ -372,7 +372,7 @@ static void show_trace(struct cpu_user_regs *regs) > > > > > > > > void show_stack(struct cpu_user_regs *regs) > > > > { > > > > - uint32_t *stack = STACK_BEFORE_EXCEPTION(regs), addr; > > > > + register_t *stack = STACK_BEFORE_EXCEPTION(regs), addr; > > > > int i; > > > > > > > > if ( guest_mode(regs) ) > > > > @@ -486,20 +486,22 @@ static arm_hypercall_t arm_hypercall_table[] = { > > > > > > > > static void do_debug_trap(struct cpu_user_regs *regs, unsigned int code) > > > > { > > > > - uint32_t reg, *r; > > > > + register_t *r; > > > > + uint32_t reg; > > > > uint32_t domid = current->domain->domain_id; > > > > switch ( code ) { > > > > case 0xe0 ... 0xef: > > > > reg = code - 0xe0; > > > > r = select_user_reg(regs, reg); > > > > - printk("DOM%d: R%d = %#010"PRIx32" at %#010"PRIx32"\n", > > > > + printk("DOM%d: R%d = 0x%"PRIregister" at 0x%"PRIvaddr"\n", > > > > domid, reg, *r, regs->pc); > > > > break; > > > > case 0xfd: > > > > - printk("DOM%d: Reached %#010"PRIx32"\n", domid, regs->pc); > > > > + printk("DOM%d: Reached %"PRIvaddr"\n", domid, regs->pc); > > > > break; > > > > case 0xfe: > > > > - printk("%c", (char)(regs->r0 & 0xff)); > > > > + r = select_user_reg(regs, 0); > > > > + printk("%c", (char)(*r & 0xff)); > > > > break; > > > > case 0xff: > > > > printk("DOM%d: DEBUG\n", domid); > > > > @@ -561,7 +563,7 @@ static void do_cp15_32(struct cpu_user_regs *regs, > > > > union hsr hsr) > > > > { > > > > struct hsr_cp32 cp32 = hsr.cp32; > > > > - uint32_t *r = select_user_reg(regs, cp32.reg); > > > > + uint32_t *r = (uint32_t*)select_user_reg(regs, cp32.reg); > > > > > > > > if ( !cp32.ccvalid ) { > > > > dprintk(XENLOG_ERR, "cp_15(32): need to handle invalid condition codes\n"); > > > > @@ -607,7 +609,7 @@ static void do_cp15_32(struct cpu_user_regs *regs, > > > > BUG_ON(!vtimer_emulate(regs, hsr)); > > > > break; > > > > default: > > > > - printk("%s p15, %d, r%d, cr%d, cr%d, %d @ %#08x\n", > > > > + printk("%s p15, %d, r%d, cr%d, cr%d, %d @ 0x%"PRIregister"\n", > > > > cp32.read ? "mrc" : "mcr", > > > > cp32.op1, cp32.reg, cp32.crn, cp32.crm, cp32.op2, regs->pc); > > > > panic("unhandled 32-bit CP15 access %#x\n", hsr.bits & HSR_CP32_REGS_MASK); > > > > @@ -637,7 +639,7 @@ static void do_cp15_64(struct cpu_user_regs *regs, > > > > BUG_ON(!vtimer_emulate(regs, hsr)); > > > > break; > > > > default: > > > > - printk("%s p15, %d, r%d, r%d, cr%d @ %#08x\n", > > > > + printk("%s p15, %d, r%d, r%d, cr%d @ 0x%"PRIregister"\n", > > > > cp64.read ? "mrrc" : "mcrr", > > > > cp64.op1, cp64.reg1, cp64.reg2, cp64.crm, regs->pc); > > > > panic("unhandled 64-bit CP15 access %#x\n", hsr.bits & HSR_CP64_REGS_MASK); > > > > diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c > > > > index 39b9775..57147d5 100644 > > > > --- a/xen/arch/arm/vgic.c > > > > +++ b/xen/arch/arm/vgic.c > > > > @@ -160,7 +160,7 @@ static int vgic_distr_mmio_read(struct vcpu *v, mmio_info_t *info) > > > > { > > > > struct hsr_dabt dabt = info->dabt; > > > > struct cpu_user_regs *regs = guest_cpu_user_regs(); > > > > - uint32_t *r = select_user_reg(regs, dabt.reg); > > > > + register_t *r = select_user_reg(regs, dabt.reg); > > > > struct vgic_irq_rank *rank; > > > > int offset = (int)(info->gpa - VGIC_DISTR_BASE_ADDRESS); > > > > int gicd_reg = REG(offset); > > > > @@ -372,7 +372,7 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info) > > > > { > > > > struct hsr_dabt dabt = info->dabt; > > > > struct cpu_user_regs *regs = guest_cpu_user_regs(); > > > > - uint32_t *r = select_user_reg(regs, dabt.reg); > > > > + register_t *r = select_user_reg(regs, dabt.reg); > > > > struct vgic_irq_rank *rank; > > > > int offset = (int)(info->gpa - VGIC_DISTR_BASE_ADDRESS); > > > > int gicd_reg = REG(offset); > > > > @@ -421,13 +421,13 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info) > > > > > > > > case GICD_ISPENDR ... GICD_ISPENDRN: > > > > if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width; > > > > - printk("vGICD: unhandled %s write %#"PRIx32" to ISPENDR%d\n", > > > > + printk("vGICD: unhandled %s write %#"PRIregister" to ISPENDR%d\n", > > > > dabt.size ? "word" : "byte", *r, gicd_reg - GICD_ISPENDR); > > > > return 0; > > > > > > > > case GICD_ICPENDR ... GICD_ICPENDRN: > > > > if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width; > > > > - printk("vGICD: unhandled %s write %#"PRIx32" to ICPENDR%d\n", > > > > + printk("vGICD: unhandled %s write %#"PRIregister" to ICPENDR%d\n", > > > > dabt.size ? "word" : "byte", *r, gicd_reg - GICD_ICPENDR); > > > > return 0; > > > > > > > > @@ -499,19 +499,19 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info) > > > > > > > > case GICD_SGIR: > > > > if ( dabt.size != 2 ) goto bad_width; > > > > - printk("vGICD: unhandled write %#"PRIx32" to ICFGR%d\n", > > > > + printk("vGICD: unhandled write %#"PRIregister" to ICFGR%d\n", > > > > *r, gicd_reg - GICD_ICFGR); > > > > return 0; > > > > > > > > case GICD_CPENDSGIR ... GICD_CPENDSGIRN: > > > > if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width; > > > > - printk("vGICD: unhandled %s write %#"PRIx32" to ICPENDSGIR%d\n", > > > > + printk("vGICD: unhandled %s write %#"PRIregister" to ICPENDSGIR%d\n", > > > > dabt.size ? "word" : "byte", *r, gicd_reg - GICD_CPENDSGIR); > > > > return 0; > > > > > > > > case GICD_SPENDSGIR ... GICD_SPENDSGIRN: > > > > if ( dabt.size != 0 && dabt.size != 2 ) goto bad_width; > > > > - printk("vGICD: unhandled %s write %#"PRIx32" to ISPENDSGIR%d\n", > > > > + printk("vGICD: unhandled %s write %#"PRIregister" to ISPENDSGIR%d\n", > > > > dabt.size ? "word" : "byte", *r, gicd_reg - GICD_SPENDSGIR); > > > > return 0; > > > > > > > > @@ -537,13 +537,13 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info) > > > > goto write_ignore; > > > > > > > > default: > > > > - printk("vGICD: unhandled write r%d=%"PRIx32" offset %#08x\n", > > > > + printk("vGICD: unhandled write r%d=%"PRIregister" offset %#08x\n", > > > > dabt.reg, *r, offset); > > > > return 0; > > > > } > > > > > > > > bad_width: > > > > - printk("vGICD: bad write width %d r%d=%"PRIx32" offset %#08x\n", > > > > + printk("vGICD: bad write width %d r%d=%"PRIregister" offset %#08x\n", > > > > dabt.size, dabt.reg, *r, offset); > > > > domain_crash_synchronous(); > > > > return 0; > > > > diff --git a/xen/arch/arm/vpl011.c b/xen/arch/arm/vpl011.c > > > > index 7dcee90..db5094e 100644 > > > > --- a/xen/arch/arm/vpl011.c > > > > +++ b/xen/arch/arm/vpl011.c > > > > @@ -92,7 +92,7 @@ static int uart0_mmio_read(struct vcpu *v, mmio_info_t *info) > > > > { > > > > struct hsr_dabt dabt = info->dabt; > > > > struct cpu_user_regs *regs = guest_cpu_user_regs(); > > > > - uint32_t *r = select_user_reg(regs, dabt.reg); > > > > + register_t *r = select_user_reg(regs, dabt.reg); > > > > int offset = (int)(info->gpa - UART0_START); > > > > > > > > switch ( offset ) > > > > @@ -114,7 +114,7 @@ static int uart0_mmio_write(struct vcpu *v, mmio_info_t *info) > > > > { > > > > struct hsr_dabt dabt = info->dabt; > > > > struct cpu_user_regs *regs = guest_cpu_user_regs(); > > > > - uint32_t *r = select_user_reg(regs, dabt.reg); > > > > + register_t *r = select_user_reg(regs, dabt.reg); > > > > int offset = (int)(info->gpa - UART0_START); > > > > > > > > switch ( offset ) > > > > @@ -127,7 +127,7 @@ static int uart0_mmio_write(struct vcpu *v, mmio_info_t *info) > > > > /* Silently ignore */ > > > > return 1; > > > > default: > > > > - printk("VPL011: unhandled write r%d=%"PRIx32" offset %#08x\n", > > > > + printk("VPL011: unhandled write r%d=%"PRIregister" offset %#08x\n", > > > > dabt.reg, *r, offset); > > > > domain_crash_synchronous(); > > > > } > > > > diff --git a/xen/arch/arm/vtimer.c b/xen/arch/arm/vtimer.c > > > > index 85201b5..291b87e 100644 > > > > --- a/xen/arch/arm/vtimer.c > > > > +++ b/xen/arch/arm/vtimer.c > > > > @@ -99,7 +99,7 @@ static int vtimer_emulate_32(struct cpu_user_regs *regs, union hsr hsr) > > > > { > > > > struct vcpu *v = current; > > > > struct hsr_cp32 cp32 = hsr.cp32; > > > > - uint32_t *r = select_user_reg(regs, cp32.reg); > > > > + uint32_t *r = (uint32_t *)select_user_reg(regs, cp32.reg); > > > > s_time_t now; > > > > > > > > switch ( hsr.bits & HSR_CP32_REGS_MASK ) > > > > @@ -151,8 +151,8 @@ static int vtimer_emulate_64(struct cpu_user_regs *regs, union hsr hsr) > > > > { > > > > struct vcpu *v = current; > > > > struct hsr_cp64 cp64 = hsr.cp64; > > > > - uint32_t *r1 = select_user_reg(regs, cp64.reg1); > > > > - uint32_t *r2 = select_user_reg(regs, cp64.reg2); > > > > + uint32_t *r1 = (uint32_t *)select_user_reg(regs, cp64.reg1); > > > > + uint32_t *r2 = (uint32_t *)select_user_reg(regs, cp64.reg2); > > > > uint64_t ticks; > > > > s_time_t now; > > > > > > > > diff --git a/xen/include/asm-arm/regs.h b/xen/include/asm-arm/regs.h > > > > index 7486944..a723f92 100644 > > > > --- a/xen/include/asm-arm/regs.h > > > > +++ b/xen/include/asm-arm/regs.h > > > > @@ -34,7 +34,7 @@ > > > > * Returns a pointer to the given register value in regs, taking the > > > > * processor mode (CPSR) into account. > > > > */ > > > > -extern uint32_t *select_user_reg(struct cpu_user_regs *regs, int reg); > > > > +extern register_t *select_user_reg(struct cpu_user_regs *regs, int reg); > > > > > > > > #endif /* __ARM_REGS_H__ */ > > > > /* > > > > diff --git a/xen/include/asm-arm/types.h b/xen/include/asm-arm/types.h > > > > index d3e16d8..9ca32f1 100644 > > > > --- a/xen/include/asm-arm/types.h > > > > +++ b/xen/include/asm-arm/types.h > > > > @@ -41,6 +41,8 @@ typedef u32 vaddr_t; > > > > typedef u64 paddr_t; > > > > #define INVALID_PADDR (~0ULL) > > > > #define PRIpaddr "016llx" > > > > +typedef u32 register_t; > > > > +#define PRIregister "x" > > > > #elif defined (CONFIG_ARM_64) > > > > typedef signed long s64; > > > > typedef unsigned long u64; > > > > @@ -49,6 +51,8 @@ typedef u64 vaddr_t; > > > > typedef u64 paddr_t; > > > > #define INVALID_PADDR (~0UL) > > > > #define PRIpaddr "016lx" > > > > +typedef u64 register_t; > > > > +#define PRIregister "lx" > > > > #endif > > > > > > > > typedef unsigned long size_t; > > > > -- > > > > 1.7.2.5 > > > > > > > > > > > > > > > >
At 12:06 +0000 on 15 Feb (1360930007), Ian Campbell wrote:> On Thu, 2013-02-14 at 16:47 +0000, Ian Campbell wrote: > > You can also run 32-bit on the V8 model (using -C > > cluster.cpu0.CONFIG64=0) if you comment out the ThumbEE in > > ctxt_switch_from and ctxt_switch_to (making this dynamic is on my TODO > > list). > > 8<----------------------------------------------- > > From e45c4e4f45e72e404052629c619af8810dadd76f Mon Sep 17 00:00:00 2001 > From: Ian Campbell <ian.campbell@citrix.com> > Date: Fri, 15 Feb 2013 10:30:48 +0000 > Subject: [PATCH] xen: arm: implement cpuinfo > > Use to: > > - Only context switch ThumbEE state if the processor implements it. In > particular the ARMv8 FastModels do not. > - Detect the generic timer, and therefore call identify_cpu before > init_xen_time. > > Also improve the boot time messages a bit. > > I haven''t added decoding for all of the CPUID words, it seems like overkill > for the moment. > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>Acked-by: Tim Deegan <tim@xen.org>