This series has been rebased (on top of my SMP one, also just reposted) and acks have been accumulated. I''ve replied to the feedback but there wasn''t any changes left to make after I had done so. From a release perspective this only touches ARM code and I think 64-bit support (even if only dom0) is worth the risk to 32-bit ARM at this stage in the freeze. Ian.
Correct definition of UREGS_kernel_sizeof and use it. Correct adjustment of stack on entry and exit. Add 64-bit versions of the build time checks for stack pointer alignment correctness when pushing the stack frames. Lastly, correct the padding in the stack frames to properly align the inner and outer frames and also avoid an unnecessary 64bit padding field. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- xen/arch/arm/arm64/asm-offsets.c | 2 +- xen/arch/arm/arm64/entry.S | 9 +++++---- xen/arch/arm/domain.c | 2 ++ xen/arch/arm/traps.c | 7 +++++++ xen/include/asm-arm/arm64/processor.h | 7 +++---- 5 files changed, 18 insertions(+), 9 deletions(-) diff --git a/xen/arch/arm/arm64/asm-offsets.c b/xen/arch/arm/arm64/asm-offsets.c index 7949e3e..7544082 100644 --- a/xen/arch/arm/arm64/asm-offsets.c +++ b/xen/arch/arm/arm64/asm-offsets.c @@ -39,7 +39,7 @@ void __dummy__(void) OFFSET(UREGS_SP_el1, struct cpu_user_regs, sp_el1); OFFSET(UREGS_ELR_el1, struct cpu_user_regs, elr_el1); - OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, cpsr); + OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, spsr_el1); DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs)); BLANK(); diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S index 5656f45..b5af1e2 100644 --- a/xen/arch/arm/arm64/entry.S +++ b/xen/arch/arm/arm64/entry.S @@ -35,7 +35,7 @@ lr .req x30 // link register mrs x22, SP_el0 str x22, [x21] - add x21, sp, #UREGS_ELR_el1 + add x21, sp, #UREGS_SP_el1 mrs x22, SP_el1 mrs x23, ELR_el1 stp x22, x23, [x21] @@ -60,7 +60,7 @@ lr .req x30 // link register * Save state on entry to hypervisor */ .macro entry, hyp, compat - sub sp, sp, #(UREGS_SPSR_el1 - UREGS_SP) + sub sp, sp, #(UREGS_SPSR_el1 - UREGS_LR) /* CPSR, PC, SP, LR */ push x28, x29 push x26, x27 push x24, x25 @@ -79,7 +79,7 @@ lr .req x30 // link register .if \hyp == 1 /* Hypervisor mode */ - add x21, sp, #(UREGS_X0 - UREGS_SP) + add x21, sp, #UREGS_kernel_sizeof .else /* Guest mode */ @@ -214,7 +214,8 @@ ENTRY(return_to_hypervisor) pop x26, x27 pop x28, x29 - ldr lr, [sp], #(UREGS_SPSR_el1 - UREGS_SP) + ldr lr, [sp], #(UREGS_SPSR_el1 - UREGS_LR) /* CPSR, PC, SP, LR */ + eret /* diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c index f369871..08bb132 100644 --- a/xen/arch/arm/domain.c +++ b/xen/arch/arm/domain.c @@ -419,6 +419,8 @@ int vcpu_initialise(struct vcpu *v) { int rc = 0; + BUILD_BUG_ON( sizeof(struct cpu_info) > STACK_SIZE ); + v->arch.stack = alloc_xenheap_pages(STACK_ORDER, MEMF_node(vcpu_to_node(v))); if ( v->arch.stack == NULL ) return -ENOMEM; diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index a98a45e..6b19bc5 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -43,9 +43,16 @@ * entry.S) and struct cpu_info (which lives at the bottom of a Xen * stack) must be doubleword-aligned in size. */ static inline void check_stack_alignment_constraints(void) { +#ifdef CONFIG_ARM_64 + BUILD_BUG_ON((sizeof (struct cpu_user_regs)) & 0xf); + BUILD_BUG_ON((offsetof(struct cpu_user_regs, spsr_el1)) & 0xf); + BUILD_BUG_ON((offsetof(struct cpu_user_regs, lr)) & 0xf); + BUILD_BUG_ON((sizeof (struct cpu_info)) & 0xf); +#else BUILD_BUG_ON((sizeof (struct cpu_user_regs)) & 0x7); BUILD_BUG_ON((offsetof(struct cpu_user_regs, sp_usr)) & 0x7); BUILD_BUG_ON((sizeof (struct cpu_info)) & 0x7); +#endif } static int debug_stack_lines = 20; diff --git a/xen/include/asm-arm/arm64/processor.h b/xen/include/asm-arm/arm64/processor.h index b4602fa..bf436c8 100644 --- a/xen/include/asm-arm/arm64/processor.h +++ b/xen/include/asm-arm/arm64/processor.h @@ -51,6 +51,7 @@ struct cpu_user_regs __DECL_REG(x27, r11_fiq); __DECL_REG(x28, r12_fiq); __DECL_REG(/* x29 */ fp, /* r13_fiq */ sp_fiq); + __DECL_REG(/* x30 */ lr, /* r14_fiq */ lr_fiq); register_t sp; /* Valid for hypervisor frames */ @@ -59,7 +60,7 @@ struct cpu_user_regs __DECL_REG(pc, pc32); /* ELR_EL2 */ uint32_t cpsr; /* SPSR_EL2 */ - uint64_t pad0; + uint32_t pad0; /* Align end of kernel frame. */ /* Outer guest frame only from here on... */ @@ -68,7 +69,7 @@ struct cpu_user_regs uint32_t spsr_svc; /* AArch32 */ }; - uint32_t pad1; /* Align */ + uint32_t pad1; /* Doubleword-align the user half of the frame */ /* AArch32 guests only */ uint32_t spsr_fiq, spsr_irq, spsr_und, spsr_abt; @@ -76,8 +77,6 @@ struct cpu_user_regs /* AArch64 guests only */ uint64_t sp_el0; uint64_t sp_el1, elr_el1; - - uint64_t pad2; /* Doubleword-align the user half of the frame */ }; #undef __DECL_REG -- 1.7.2.5
Ian Campbell
2013-Apr-17 13:16 UTC
[PATCH 02/13] xen: arm: rename 32-bit specific zImage field offset constants
This will help avoid confusion when 64-bit Image support is added. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- xen/arch/arm/kernel.c | 28 ++++++++++++++-------------- 1 files changed, 14 insertions(+), 14 deletions(-) diff --git a/xen/arch/arm/kernel.c b/xen/arch/arm/kernel.c index 8f4a60d..e7f34a4 100644 --- a/xen/arch/arm/kernel.c +++ b/xen/arch/arm/kernel.c @@ -19,12 +19,12 @@ #define KERNEL_FLASH_ADDRESS 0x00000000UL #define KERNEL_FLASH_SIZE 0x00800000UL -#define ZIMAGE_MAGIC_OFFSET 0x24 -#define ZIMAGE_START_OFFSET 0x28 -#define ZIMAGE_END_OFFSET 0x2c -#define ZIMAGE_HEADER_LEN 0x30 +#define ZIMAGE32_MAGIC_OFFSET 0x24 +#define ZIMAGE32_START_OFFSET 0x28 +#define ZIMAGE32_END_OFFSET 0x2c +#define ZIMAGE32_HEADER_LEN 0x30 -#define ZIMAGE_MAGIC 0x016f2818 +#define ZIMAGE32_MAGIC 0x016f2818 struct minimal_dtb_header { uint32_t magic; @@ -99,26 +99,26 @@ static void kernel_zimage_load(struct kernel_info *info) } } -/** - * Check the image is a zImage and return the load address and length +/* + * Check if the image is a 32-bit zImage and setup kernel_info */ -static int kernel_try_zimage_prepare(struct kernel_info *info, +static int kernel_try_zimage32_prepare(struct kernel_info *info, paddr_t addr, paddr_t size) { - uint32_t zimage[ZIMAGE_HEADER_LEN/4]; + uint32_t zimage[ZIMAGE32_HEADER_LEN/4]; uint32_t start, end; struct minimal_dtb_header dtb_hdr; - if ( size < ZIMAGE_HEADER_LEN ) + if ( size < ZIMAGE32_HEADER_LEN ) return -EINVAL; copy_from_paddr(zimage, addr, sizeof(zimage), DEV_SHARED); - if (zimage[ZIMAGE_MAGIC_OFFSET/4] != ZIMAGE_MAGIC) + if (zimage[ZIMAGE32_MAGIC_OFFSET/4] != ZIMAGE32_MAGIC) return -EINVAL; - start = zimage[ZIMAGE_START_OFFSET/4]; - end = zimage[ZIMAGE_END_OFFSET/4]; + start = zimage[ZIMAGE32_START_OFFSET/4]; + end = zimage[ZIMAGE32_END_OFFSET/4]; if ( (end - start) > size ) return -EINVAL; @@ -224,7 +224,7 @@ int kernel_prepare(struct kernel_info *info) info->load_attr = BUFFERABLE; } - rc = kernel_try_zimage_prepare(info, start, size); + rc = kernel_try_zimage32_prepare(info, start, size); if (rc < 0) rc = kernel_try_elf_prepare(info, start, size); -- 1.7.2.5
Ian Campbell
2013-Apr-17 13:16 UTC
[PATCH 03/13] xen: arm: support for loading 64-bit zImage dom0
This is defined in linux/Documentation/arm64/booting.txt. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- xen/arch/arm/kernel.c | 80 +++++++++++++++++++++++++++++++++++++++++++++--- 1 files changed, 75 insertions(+), 5 deletions(-) diff --git a/xen/arch/arm/kernel.c b/xen/arch/arm/kernel.c index e7f34a4..59844b0 100644 --- a/xen/arch/arm/kernel.c +++ b/xen/arch/arm/kernel.c @@ -26,6 +26,8 @@ #define ZIMAGE32_MAGIC 0x016f2818 +#define ZIMAGE64_MAGIC 0x14000008 + struct minimal_dtb_header { uint32_t magic; uint32_t total_size; @@ -99,6 +101,57 @@ static void kernel_zimage_load(struct kernel_info *info) } } +#ifdef CONFIG_ARM_64 +/* + * Check if the image is a 64-bit zImage and setup kernel_info + */ +static int kernel_try_zimage64_prepare(struct kernel_info *info, + paddr_t addr, paddr_t size) +{ + /* linux/Documentation/arm64/booting.txt */ + struct { + uint32_t magic; + uint32_t res0; + uint64_t text_offset; /* Image load offset */ + uint64_t res1; + uint64_t res2; + } zimage; + uint64_t start, end; + + if ( size < sizeof(zimage) ) + return -EINVAL; + + copy_from_paddr(&zimage, addr, sizeof(zimage), DEV_SHARED); + + if (zimage.magic != ZIMAGE64_MAGIC) + return -EINVAL; + + /* Currently there is no length in the header, so just use the size */ + start = 0; + end = size; + + /* + * Given the above this check is a bit pointless, but leave it + * here in case someone adds a length field in the future. + */ + if ( (end - start) > size ) + return -EINVAL; + + info->zimage.kernel_addr = addr; + + info->zimage.load_addr = info->mem.bank[0].start + + zimage.text_offset; + info->zimage.len = end - start; + + info->entry = info->zimage.load_addr; + info->load = kernel_zimage_load; + + info->type = DOMAIN_PV64; + + return 0; +} +#endif + /* * Check if the image is a 32-bit zImage and setup kernel_info */ @@ -153,6 +206,10 @@ static int kernel_try_zimage32_prepare(struct kernel_info *info, info->entry = info->zimage.load_addr; info->load = kernel_zimage_load; +#ifdef CONFIG_ARM_64 + info->type = DOMAIN_PV32; +#endif + return 0; } @@ -187,6 +244,19 @@ static int kernel_try_elf_prepare(struct kernel_info *info, if ( (rc = elf_xen_parse(&info->elf.elf, &info->elf.parms)) != 0 ) goto err; +#ifdef CONFIG_ARM_64 + if ( elf_32bit(&info->elf.elf) ) + info->type = DOMAIN_PV32; + else if ( elf_64bit(&info->elf.elf) ) + info->type = DOMAIN_PV64; + else + { + printk("Unknown ELF class\n"); + rc = -EINVAL; + goto err; + } +#endif + /* * TODO: can the ELF header be used to find the physical address * to load the image to? Instead of assuming virt == phys. @@ -224,13 +294,13 @@ int kernel_prepare(struct kernel_info *info) info->load_attr = BUFFERABLE; } - rc = kernel_try_zimage32_prepare(info, start, size); - if (rc < 0) - rc = kernel_try_elf_prepare(info, start, size); - #ifdef CONFIG_ARM_64 - info->type = DOMAIN_PV32; /* No 64-bit guest support yet */ + rc = kernel_try_zimage64_prepare(info, start, size); + if (rc < 0) #endif + rc = kernel_try_zimage32_prepare(info, start, size); + if (rc < 0) + rc = kernel_try_elf_prepare(info, start, size); return rc; } -- 1.7.2.5
Ian Campbell
2013-Apr-17 13:16 UTC
[PATCH 04/13] xen: arm: support building a 64-bit dom0 domain
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> --- xen/arch/arm/domain_build.c | 10 ++++++++-- 1 files changed, 8 insertions(+), 2 deletions(-) diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c index 81a759c..2ba372d 100644 --- a/xen/arch/arm/domain_build.c +++ b/xen/arch/arm/domain_build.c @@ -420,9 +420,8 @@ int construct_dom0(struct domain *d) memset(regs, 0, sizeof(*regs)); - regs->pc = (uint32_t)kinfo.entry; + regs->pc = (register_t)kinfo.entry; - regs->cpsr = PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_SVC; #ifdef CONFIG_ARM_64 d->arch.type = kinfo.type; @@ -430,6 +429,11 @@ int construct_dom0(struct domain *d) if ( is_pv32_domain(d) ) { + regs->cpsr = PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_SVC; + + /* Pretend to be a Cortex A15 */ + d->arch.vpidr = 0x410fc0f0; + /* FROM LINUX head.S * * Kernel startup entry point. @@ -447,6 +451,8 @@ int construct_dom0(struct domain *d) #ifdef CONFIG_ARM_64 else { + regs->cpsr = PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_EL1h; + /* From linux/Documentation/arm64/booting.txt */ regs->x0 = kinfo.dtb_paddr; regs->x1 = 0; /* Reserved for future use */ -- 1.7.2.5
Ian Campbell
2013-Apr-17 13:16 UTC
[PATCH 05/13] xen: arm: precalculate VTTBR_EL2 for a domain when setting up its p2m
Mostly just to help with upcoming vcpu_show_registers changes. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> --- xen/arch/arm/p2m.c | 16 +++++++++------- xen/include/asm-arm/domain.h | 3 +++ 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c index 1e8c8b4..175216c 100644 --- a/xen/arch/arm/p2m.c +++ b/xen/arch/arm/p2m.c @@ -23,13 +23,10 @@ void dump_p2m_lookup(struct domain *d, paddr_t addr) void p2m_load_VTTBR(struct domain *d) { - struct p2m_domain *p2m = &d->arch.p2m; - paddr_t maddr = page_to_maddr(p2m->first_level); - uint64_t vttbr = maddr; - - vttbr |= ((uint64_t)p2m->vmid&0xff)<<48; - - WRITE_SYSREG64(vttbr, VTTBR_EL2); + if ( is_idle_domain(d) ) + return; + BUG_ON(!d->arch.vttbr); + WRITE_SYSREG64(d->arch.vttbr, VTTBR_EL2); isb(); /* Ensure update is visible */ } @@ -300,6 +297,9 @@ int p2m_alloc_table(struct domain *d) p2m->first_level = page; + d->arch.vttbr = page_to_maddr(p2m->first_level) + | ((uint64_t)p2m->vmid&0xff)<<48; + spin_unlock(&p2m->lock); return 0; @@ -331,6 +331,8 @@ int p2m_init(struct domain *d) /* Zero is reserved */ p2m->vmid = d->domain_id + 1; + d->arch.vttbr = 0; + p2m->first_level = NULL; return 0; diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h index 3fa266c2..f26b103 100644 --- a/xen/include/asm-arm/domain.h +++ b/xen/include/asm-arm/domain.h @@ -53,7 +53,10 @@ struct arch_domain enum domain_type type; #endif + /* Virtual MMU */ struct p2m_domain p2m; + uint64_t vttbr; + struct hvm_domain hvm_domain; xen_pfn_t *grant_table_gpfn; -- 1.7.2.5
Ian Campbell
2013-Apr-17 13:16 UTC
[PATCH 06/13] xen: arm: improve register dump output for 64-bit guest (and more generally too)
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- xen/arch/arm/traps.c | 171 +++++++++++++++++++++++++++-------------- xen/include/asm-arm/cpregs.h | 1 + 2 files changed, 113 insertions(+), 59 deletions(-) diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index 6b19bc5..2f4e157 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -238,7 +238,7 @@ void panic_PAR(uint64_t par) msg = decode_fsc( (par&PAR_FSC_MASK) >> PAR_FSC_SHIFT, &level); - printk("PAR: %010"PRIx64": %s stage %d%s%s\n", + printk("PAR: %016"PRIx64": %s stage %d%s%s\n", par, msg, stage, second_in_first ? " during second stage lookup" : "", @@ -248,33 +248,60 @@ void panic_PAR(uint64_t par) } struct reg_ctxt { - uint32_t sctlr, tcr; - uint64_t ttbr0, ttbr1; + /* Guest-side state */ + uint32_t sctlr_el1, tcr_el1; + uint64_t ttbr0_el1, ttbr1_el1; #ifdef CONFIG_ARM_32 + uint32_t dfsr, ifsr; uint32_t dfar, ifar; #else + uint32_t esr_el1; uint64_t far; + uint32_t ifsr32_el2; #endif + + /* Hypervisor-side state */ + uint64_t vttbr_el2; }; +static const char *mode_string(uint32_t cpsr) +{ + uint32_t mode; + static const char *mode_strings[] = { + [PSR_MODE_USR] = "32-bit Guest USR", + [PSR_MODE_FIQ] = "32-bit Guest FIQ", + [PSR_MODE_IRQ] = "32-bit Guest IRQ", + [PSR_MODE_SVC] = "32-bit Guest SVC", + [PSR_MODE_MON] = "32-bit Monitor", + [PSR_MODE_ABT] = "32-bit Guest ABT", + [PSR_MODE_HYP] = "Hypervisor", + [PSR_MODE_UND] = "32-bit Guest UND", + [PSR_MODE_SYS] = "32-bit Guest SYS", +#ifdef CONFIG_ARM_64 + [PSR_MODE_EL3h] = "64-bit EL3h (Monitor, handler)", + [PSR_MODE_EL3t] = "64-bit EL3t (Monitor, thread)", + [PSR_MODE_EL2h] = "64-bit EL2h (Hypervisor, handler)", + [PSR_MODE_EL2t] = "64-bit EL2t (Hypervisor, thread)", + [PSR_MODE_EL1h] = "64-bit EL1h (Guest Kernel, handler)", + [PSR_MODE_EL1t] = "64-bit EL1t (Guest Kernel, thread)", + [PSR_MODE_EL0t] = "64-bit EL0t (Guest User)", +#endif + }; + mode = cpsr & PSR_MODE_MASK; + + if ( mode > ARRAY_SIZE(mode_strings) ) + return "Unknown"; + return mode_strings[mode] ? : "Unknown"; +} + static void show_registers_32(struct cpu_user_regs *regs, struct reg_ctxt *ctxt, int guest_mode, const struct vcpu *v) { - static const char *mode_strings[] = { - [PSR_MODE_USR] = "USR", - [PSR_MODE_FIQ] = "FIQ", - [PSR_MODE_IRQ] = "IRQ", - [PSR_MODE_SVC] = "SVC", - [PSR_MODE_MON] = "MON", - [PSR_MODE_ABT] = "ABT", - [PSR_MODE_HYP] = "HYP", - [PSR_MODE_UND] = "UND", - [PSR_MODE_SYS] = "SYS" - }; #ifdef CONFIG_ARM_64 + BUG_ON( ! (regs->cpsr & PSR_MODE_BIT) ); printk("PC: %08"PRIx32"\n", regs->pc32); #else printk("PC: %08"PRIx32, regs->pc); @@ -282,9 +309,8 @@ static void show_registers_32(struct cpu_user_regs *regs, print_symbol(" %s", regs->pc); printk("\n"); #endif - printk("CPSR: %08"PRIx32" MODE:%s%s\n", regs->cpsr, - guest_mode ? "32-bit Guest " : "Hypervisor", - guest_mode ? mode_strings[regs->cpsr & PSR_MODE_MASK] : ""); + printk("CPSR: %08"PRIx32" MODE:%s\n", regs->cpsr, + mode_string(regs->cpsr)); printk(" R0: %08"PRIx32" R1: %08"PRIx32" R2: %08"PRIx32" R3: %08"PRIx32"\n", regs->r0, regs->r1, regs->r2, regs->r3); printk(" R4: %08"PRIx32" R5: %08"PRIx32" R6: %08"PRIx32" R7: %08"PRIx32"\n", @@ -325,15 +351,19 @@ static void show_registers_32(struct cpu_user_regs *regs, if ( guest_mode ) { - printk("TTBR0 %010"PRIx64" TTBR1 %010"PRIx64" TCR %08"PRIx32"\n", - ctxt->ttbr0, ctxt->ttbr1, ctxt->tcr); - printk("SCTLR %08"PRIx32"\n", ctxt->sctlr); - printk("IFAR %08"PRIx32" DFAR %08"PRIx32"\n", + printk(" SCTLR: %08"PRIx32"\n", ctxt->sctlr_el1); + printk(" TCR: %08"PRIx32"\n", ctxt->tcr_el1); + printk(" TTBR0: %016"PRIx64"\n", ctxt->ttbr0_el1); + printk(" TTBR1: %016"PRIx64"\n", ctxt->ttbr1_el1); + printk(" IFAR: %08"PRIx32", IFSR: %08"PRIx32"\n" + " DFAR: %08"PRIx32", DFSR: %08"PRIx32"\n", #ifdef CONFIG_ARM_64 (uint32_t)(ctxt->far >> 32), - (uint32_t)(ctxt->far & 0xffffffff) + ctxt->ifsr32_el2, + (uint32_t)(ctxt->far & 0xffffffff), + ctxt->esr_el1 #else - ctxt->ifar, ctxt->dfar + ctxt->ifar, ctxt->ifsr, ctxt->dfar, ctxt->dfsr #endif ); printk("\n"); @@ -346,13 +376,25 @@ static void show_registers_64(struct cpu_user_regs *regs, int guest_mode, const struct vcpu *v) { + + BUG_ON( (regs->cpsr & PSR_MODE_BIT) ); + printk("PC: %016"PRIx64, regs->pc); if ( !guest_mode ) print_symbol(" %s", regs->pc); printk("\n"); - printk("SP: %08"PRIx64"\n", regs->sp); + printk("LR: %016"PRIx64"\n", regs->lr); + if ( guest_mode ) + { + printk("SP_EL0: %016"PRIx64"\n", regs->sp_el0); + printk("SP_EL1: %016"PRIx64"\n", regs->sp_el1); + } + else + { + printk("SP: %016"PRIx64"\n", regs->sp); + } printk("CPSR: %08"PRIx32" MODE:%s\n", regs->cpsr, - guest_mode ? "64-bit Guest" : "Hypervisor"); + mode_string(regs->cpsr)); printk(" X0: %016"PRIx64" X1: %016"PRIx64" X2: %016"PRIx64"\n", regs->x0, regs->x1, regs->x2); printk(" X3: %016"PRIx64" X4: %016"PRIx64" X5: %016"PRIx64"\n", @@ -371,17 +413,20 @@ static void show_registers_64(struct cpu_user_regs *regs, regs->x21, regs->x22, regs->x23); printk(" X24: %016"PRIx64" X25: %016"PRIx64" X26: %016"PRIx64"\n", regs->x24, regs->x25, regs->x26); - printk(" X27: %016"PRIx64" X28: %016"PRIx64" X29: %016"PRIx64"\n", - regs->x27, regs->x28, regs->lr); + printk(" X27: %016"PRIx64" X28: %016"PRIx64" FP: %016"PRIx64"\n", + regs->x27, regs->x28, regs->fp); printk("\n"); if ( guest_mode ) { - printk("SCTLR_EL1: %08"PRIx32"\n", ctxt->sctlr); - printk(" TCR_EL1: %08"PRIx32"\n", ctxt->tcr); - printk("TTBR0_EL1: %010"PRIx64"\n", ctxt->ttbr0); - printk("TTBR1_EL1: %010"PRIx64"\n", ctxt->ttbr1); - printk(" FAR_EL1: %010"PRIx64"\n", ctxt->far); + printk(" ELR_EL1: %016"PRIx64"\n", regs->elr_el1); + printk(" ESR_EL1: %08"PRIx32"\n", ctxt->esr_el1); + printk(" FAR_EL1: %016"PRIx64"\n", ctxt->far); + printk("\n"); + printk(" SCTLR_EL1: %08"PRIx32"\n", ctxt->sctlr_el1); + printk(" TCR_EL1: %08"PRIx32"\n", ctxt->tcr_el1); + printk(" TTBR0_EL1: %016"PRIx64"\n", ctxt->ttbr0_el1); + printk(" TTBR1_EL1: %016"PRIx64"\n", ctxt->ttbr1_el1); printk("\n"); } } @@ -413,60 +458,68 @@ static void _show_registers(struct cpu_user_regs *regs, show_registers_32(regs, ctxt, guest_mode, v); #endif } - -#ifdef CONFIG_ARM_32 - printk("HTTBR %"PRIx64"\n", READ_CP64(HTTBR)); - printk("HDFAR %"PRIx32"\n", READ_CP32(HDFAR)); - printk("HIFAR %"PRIx32"\n", READ_CP32(HIFAR)); - printk("HPFAR %"PRIx32"\n", READ_CP32(HPFAR)); - printk("HCR %08"PRIx32"\n", READ_CP32(HCR)); - printk("HSR %"PRIx32"\n", READ_CP32(HSR)); - printk("VTTBR %010"PRIx64"\n", READ_CP64(VTTBR)); + printk(" VTCR_EL2: %08"PRIx32"\n", READ_SYSREG32(VTCR_EL2)); + printk(" VTTBR_EL2: %016"PRIx64"\n", ctxt->vttbr_el2); printk("\n"); - printk("DFSR %"PRIx32" DFAR %"PRIx32"\n", READ_CP32(DFSR), READ_CP32(DFAR)); - printk("IFSR %"PRIx32" IFAR %"PRIx32"\n", READ_CP32(IFSR), READ_CP32(IFAR)); + printk(" SCTLR_EL2: %08"PRIx32"\n", READ_SYSREG32(SCTLR_EL2)); + printk(" HCR_EL2: %016"PRIregister"\n", READ_SYSREG(HCR_EL2)); + printk(" TTBR0_EL2: %016"PRIx64"\n", READ_SYSREG64(TTBR0_EL2)); printk("\n"); + printk(" ESR_EL2: %08"PRIx32"\n", READ_SYSREG32(ESR_EL2)); + printk(" HPFAR_EL2: %016"PRIregister"\n", READ_SYSREG(HPFAR_EL2)); + +#ifdef CONFIG_ARM_32 + printk(" HDFAR: %08"PRIx32"\n", READ_CP32(HDFAR)); + printk(" HIFAR: %08"PRIx32"\n", READ_CP32(HIFAR)); #else - printk("TTBR0_EL2: %"PRIx64"\n", READ_SYSREG64(TTBR0_EL2)); - printk(" FAR_EL2: %"PRIx64"\n", READ_SYSREG64(FAR_EL2)); - printk("HPFAR_EL2: %"PRIx64"\n", READ_SYSREG64(HPFAR_EL2)); - printk(" HCR_EL2: %"PRIx64"\n", READ_SYSREG64(HCR_EL2)); - printk(" ESR_EL2: %"PRIx64"\n", READ_SYSREG64(ESR_EL2)); - printk("VTTBR_EL2: %"PRIx64"\n", READ_SYSREG64(VTTBR_EL2)); - printk("\n"); + printk(" FAR_EL2: %016"PRIx64"\n", READ_SYSREG64(FAR_EL2)); #endif + printk("\n"); } void show_registers(struct cpu_user_regs *regs) { struct reg_ctxt ctxt; - ctxt.sctlr = READ_SYSREG(SCTLR_EL1); - ctxt.tcr = READ_SYSREG(TCR_EL1); - ctxt.ttbr0 = READ_SYSREG64(TTBR0_EL1); - ctxt.ttbr1 = READ_SYSREG64(TTBR1_EL1); + ctxt.sctlr_el1 = READ_SYSREG(SCTLR_EL1); + ctxt.tcr_el1 = READ_SYSREG(TCR_EL1); + ctxt.ttbr0_el1 = READ_SYSREG64(TTBR0_EL1); + ctxt.ttbr1_el1 = READ_SYSREG64(TTBR1_EL1); #ifdef CONFIG_ARM_32 ctxt.dfar = READ_CP32(DFAR); ctxt.ifar = READ_CP32(IFAR); + ctxt.dfsr = READ_CP32(DFSR); + ctxt.ifsr = READ_CP32(IFSR); #else ctxt.far = READ_SYSREG(FAR_EL1); + ctxt.esr_el1 = READ_SYSREG(ESR_EL1); + ctxt.ifsr32_el2 = READ_SYSREG(IFSR32_EL2); #endif + ctxt.vttbr_el2 = READ_SYSREG64(VTTBR_EL2); + _show_registers(regs, &ctxt, guest_mode(regs), current); } void vcpu_show_registers(const struct vcpu *v) { struct reg_ctxt ctxt; - ctxt.sctlr = v->arch.sctlr; - ctxt.tcr = v->arch.ttbcr; - ctxt.ttbr0 = v->arch.ttbr0; - ctxt.ttbr1 = v->arch.ttbr1; + ctxt.sctlr_el1 = v->arch.sctlr; + ctxt.tcr_el1 = v->arch.ttbcr; + ctxt.ttbr0_el1 = v->arch.ttbr0; + ctxt.ttbr1_el1 = v->arch.ttbr1; #ifdef CONFIG_ARM_32 ctxt.dfar = v->arch.dfar; ctxt.ifar = v->arch.ifar; + ctxt.dfsr = v->arch.dfsr; + ctxt.ifsr = v->arch.ifsr; #else ctxt.far = v->arch.far; + ctxt.esr_el1 = v->arch.esr; + ctxt.ifsr32_el2 = v->arch.ifsr; #endif + + ctxt.vttbr_el2 = v->domain->arch.vttbr; + _show_registers(&v->arch.cpu_info->guest_cpu_user_regs, &ctxt, 1, v); } @@ -834,7 +887,7 @@ void dump_guest_s1_walk(struct domain *d, vaddr_t addr) printk("dom%d VA 0x%08"PRIvaddr"\n", d->domain_id, addr); printk(" TTBCR: 0x%08"PRIx32"\n", ttbcr); - printk(" TTBR0: 0x%010"PRIx64" = 0x%"PRIpaddr"\n", + printk(" TTBR0: 0x%016"PRIx64" = 0x%"PRIpaddr"\n", ttbr0, p2m_lookup(d, ttbr0 & PAGE_MASK)); if ( ttbcr & TTBCR_EAE ) diff --git a/xen/include/asm-arm/cpregs.h b/xen/include/asm-arm/cpregs.h index f08d59a..71daafa 100644 --- a/xen/include/asm-arm/cpregs.h +++ b/xen/include/asm-arm/cpregs.h @@ -250,6 +250,7 @@ #define DACR32_EL2 DACR #define ESR_EL2 HSR #define HCR_EL2 HCR +#define HPFAR_EL2 HPFAR #define ID_AFR0_EL1 ID_AFR0 #define ID_DFR0_EL1 ID_DFR0 #define ID_ISAR0_EL1 ID_ISAR0 -- 1.7.2.5
Ian Campbell
2013-Apr-17 13:16 UTC
[PATCH 07/13] xen: arm: support dumping 64-bit guest stack
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> --- xen/arch/arm/traps.c | 80 +++++++++++++++++++++++++++++++++++++++++++++++-- 1 files changed, 76 insertions(+), 4 deletions(-) diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index 2f4e157..2671eba 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -523,9 +523,81 @@ void vcpu_show_registers(const struct vcpu *v) _show_registers(&v->arch.cpu_info->guest_cpu_user_regs, &ctxt, 1, v); } -static void show_guest_stack(struct cpu_user_regs *regs) +static void show_guest_stack(struct vcpu *v, struct cpu_user_regs *regs) { - printk("GUEST STACK GOES HERE\n"); + int i; + vaddr_t sp; + paddr_t stack_phys; + void *mapped; + unsigned long *stack, addr; + + switch ( regs->cpsr & PSR_MODE_MASK ) + { + case PSR_MODE_USR: + case PSR_MODE_SYS: +#ifdef CONFIG_ARM_64 + case PSR_MODE_EL0t: +#endif + printk("No stack trace for guest user-mode\n"); + return; + + case PSR_MODE_FIQ: + case PSR_MODE_IRQ: + case PSR_MODE_SVC: + case PSR_MODE_ABT: + case PSR_MODE_UND: + printk("No stack trace for 32-bit guest kernel-mode\n"); + return; + +#ifdef CONFIG_ARM_64 + case PSR_MODE_EL1t: + sp = regs->sp_el0; + break; + case PSR_MODE_EL1h: + sp = regs->sp_el1; + break; +#endif + + case PSR_MODE_HYP: + case PSR_MODE_MON: +#ifdef CONFIG_ARM_64 + case PSR_MODE_EL3h: + case PSR_MODE_EL3t: + case PSR_MODE_EL2h: + case PSR_MODE_EL2t: +#endif + default: + BUG(); + return; + } + + printk("Guest stack trace from sp=%"PRIvaddr":\n ", sp); + + if ( gvirt_to_maddr(sp, &stack_phys) ) + { + printk("Failed to convert stack to physical address\n"); + return; + } + + mapped = map_domain_page(stack_phys >> PAGE_SHIFT); + + stack = mapped + (sp & ~PAGE_MASK); + + for ( i = 0; i < (debug_stack_lines*stack_words_per_line); i++ ) + { + if ( (((long)stack - 1) ^ ((long)(stack + 1) - 1)) & PAGE_SIZE ) + break; + addr = *stack; + if ( (i != 0) && ((i % stack_words_per_line) == 0) ) + printk("\n "); + printk(" %p", _p(addr)); + stack++; + } + if ( i == 0 ) + printk("Stack empty."); + printk("\n"); + unmap_domain_page(mapped); + } #define STACK_BEFORE_EXCEPTION(regs) ((register_t*)(regs)->sp) @@ -589,7 +661,7 @@ void show_stack(struct cpu_user_regs *regs) int i; if ( guest_mode(regs) ) - return show_guest_stack(regs); + return show_guest_stack(current, regs); printk("Xen stack trace from sp=%p:\n ", stack); @@ -631,7 +703,7 @@ void vcpu_show_execution_state(struct vcpu *v) vcpu_show_registers(v); if ( !usr_mode(&v->arch.cpu_info->guest_cpu_user_regs) ) - show_guest_stack(&v->arch.cpu_info->guest_cpu_user_regs); + show_guest_stack(v, &v->arch.cpu_info->guest_cpu_user_regs); vcpu_unpause(v); } -- 1.7.2.5
Ian Campbell
2013-Apr-17 13:16 UTC
[PATCH 08/13] xen: arm: show less words in a line of a stack trace in 64-bit builds
Words are twice as wide so this ensures that a line is still <80 characters. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> --- xen/arch/arm/traps.c | 9 +++++++-- 1 files changed, 7 insertions(+), 2 deletions(-) diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index 2671eba..f1dd557 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -55,10 +55,15 @@ static inline void check_stack_alignment_constraints(void) { #endif } +#ifdef CONFIG_ARM_32 static int debug_stack_lines = 20; -integer_param("debug_stack_lines", debug_stack_lines); - #define stack_words_per_line 8 +#else +static int debug_stack_lines = 40; +#define stack_words_per_line 4 +#endif + +integer_param("debug_stack_lines", debug_stack_lines); void __cpuinit init_traps(void) -- 1.7.2.5
Ian Campbell
2013-Apr-17 13:16 UTC
[PATCH 09/13] xen: arm: Set EL1 register width in HCR_EL2 during context switch.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- xen/arch/arm/domain.c | 7 +++++++ xen/include/asm-arm/processor.h | 3 +++ 2 files changed, 10 insertions(+), 0 deletions(-) diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c index 08bb132..20bb6c7 100644 --- a/xen/arch/arm/domain.c +++ b/xen/arch/arm/domain.c @@ -203,6 +203,13 @@ static void ctxt_switch_to(struct vcpu *n) isb(); +#ifdef CONFIG_ARM_64 + if ( is_pv32_domain(n->domain) ) + hcr &= ~HCR_RW; + else + hcr |= HCR_RW; +#endif + WRITE_SYSREG(hcr, HCR_EL2); isb(); diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h index 1681ebf..32c441b 100644 --- a/xen/include/asm-arm/processor.h +++ b/xen/include/asm-arm/processor.h @@ -41,6 +41,9 @@ #define HSCTLR_BASE 0x30c51878 /* HCR Hyp Configuration Register */ +#ifdef CONFIG_ARM_64 +#define HCR_RW (1<<31) +#endif #define HCR_TGE (1<<27) #define HCR_TVM (1<<26) #define HCR_TTLB (1<<25) -- 1.7.2.5
Ian Campbell
2013-Apr-17 13:16 UTC
[PATCH 10/13] xen: arm: handle traps from 64-bit guests
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> --- xen/arch/arm/arm64/entry.S | 9 +++++++-- 1 files changed, 7 insertions(+), 2 deletions(-) diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S index b5af1e2..a70244b 100644 --- a/xen/arch/arm/arm64/entry.S +++ b/xen/arch/arm/arm64/entry.S @@ -143,11 +143,16 @@ hyp_irq: guest_sync: entry hyp=0, compat=0 - invalid BAD_SYNC /* No AArch64 guest support yet */ + msr daifclr, #2 + mov x0, sp + bl do_trap_hypervisor + b return_to_guest guest_irq: entry hyp=0, compat=0 - invalid BAD_IRQ /* No AArch64 guest support yet */ + mov x0, sp + bl do_trap_irq + b return_to_guest guest_fiq_invalid: entry hyp=0, compat=0 -- 1.7.2.5
Ian Campbell
2013-Apr-17 13:16 UTC
[PATCH 11/13] xen: arm: handle hypercalls from 64-bit guests
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> --- xen/arch/arm/traps.c | 67 ++++++++++++++++++++++++++++---------- xen/include/asm-arm/processor.h | 7 +++- 2 files changed, 54 insertions(+), 20 deletions(-) diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index f1dd557..190d1e8 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -726,8 +726,8 @@ unsigned long do_arch_0(unsigned int cmd, unsigned long long value) return 0; } -typedef unsigned long (*arm_hypercall_fn_t)( - unsigned int, unsigned int, unsigned int, unsigned int, unsigned int); +typedef register_t (*arm_hypercall_fn_t)( + register_t, register_t, register_t, register_t, register_t); typedef struct { arm_hypercall_fn_t fn; @@ -783,30 +783,49 @@ static void do_debug_trap(struct cpu_user_regs *regs, unsigned int code) } } -static void do_trap_hypercall(struct cpu_user_regs *regs, unsigned long iss) +#ifdef CONFIG_ARM_64 +#define HYPERCALL_RESULT_REG(r) (r)->x0 +#define HYPERCALL_ARG1(r) (r)->x0 +#define HYPERCALL_ARG2(r) (r)->x1 +#define HYPERCALL_ARG3(r) (r)->x2 +#define HYPERCALL_ARG4(r) (r)->x3 +#define HYPERCALL_ARG5(r) (r)->x4 +#define HYPERCALL_ARGS(r) (r)->x0, (r)->x1, (r)->x2, (r)->x3, (r)->x4 +#else +#define HYPERCALL_RESULT_REG(r) (r)->r0 +#define HYPERCALL_ARG1(r) (r)->r0 +#define HYPERCALL_ARG2(r) (r)->r1 +#define HYPERCALL_ARG3(r) (r)->r2 +#define HYPERCALL_ARG4(r) (r)->r3 +#define HYPERCALL_ARG5(r) (r)->r4 +#define HYPERCALL_ARGS(r) (r)->r0, (r)->r1, (r)->r2, (r)->r3, (r)->r4 +#endif + +static void do_trap_hypercall(struct cpu_user_regs *regs, register_t *nr, + unsigned long iss) { arm_hypercall_fn_t call = NULL; #ifndef NDEBUG - uint32_t orig_pc = regs->pc; + register_t orig_pc = regs->pc; #endif if ( iss != XEN_HYPERCALL_TAG ) domain_crash_synchronous(); - if ( regs->r12 >= ARRAY_SIZE(arm_hypercall_table) ) + if ( *nr >= ARRAY_SIZE(arm_hypercall_table) ) { - regs->r0 = -ENOSYS; + HYPERCALL_RESULT_REG(regs) = -ENOSYS; return; } - call = arm_hypercall_table[regs->r12].fn; + call = arm_hypercall_table[*nr].fn; if ( call == NULL ) { - regs->r0 = -ENOSYS; + HYPERCALL_RESULT_REG(regs) = -ENOSYS; return; } - regs->r0 = call(regs->r0, regs->r1, regs->r2, regs->r3, regs->r4); + HYPERCALL_RESULT_REG(regs) = call(HYPERCALL_ARGS(regs)); #ifndef NDEBUG /* @@ -815,16 +834,16 @@ static void do_trap_hypercall(struct cpu_user_regs *regs, unsigned long iss) */ if ( orig_pc == regs->pc ) { - switch ( arm_hypercall_table[regs->r12].nr_args ) { - case 5: regs->r4 = 0xDEADBEEF; - case 4: regs->r3 = 0xDEADBEEF; - case 3: regs->r2 = 0xDEADBEEF; - case 2: regs->r1 = 0xDEADBEEF; - case 1: /* Don''t clobber r0 -- it''s the return value */ + switch ( arm_hypercall_table[*nr].nr_args ) { + case 5: HYPERCALL_ARG5(regs) = 0xDEADBEEF; + case 4: HYPERCALL_ARG4(regs) = 0xDEADBEEF; + case 3: HYPERCALL_ARG3(regs) = 0xDEADBEEF; + case 2: HYPERCALL_ARG2(regs) = 0xDEADBEEF; + case 1: /* Don''t clobber x0/r0 -- it''s the return value */ break; default: BUG(); } - regs->r12 = 0xDEADBEEF; + *nr = 0xDEADBEEF; } #endif } @@ -1079,11 +1098,23 @@ asmlinkage void do_trap_hypervisor(struct cpu_user_regs *regs) goto bad_trap; do_cp15_64(regs, hsr); break; - case HSR_EC_HVC: + case HSR_EC_HVC32: +#ifndef NDEBUG + if ( (hsr.iss & 0xff00) == 0xff00 ) + return do_debug_trap(regs, hsr.iss & 0x00ff); +#endif + do_trap_hypercall(regs, (register_t *)®s->r12, hsr.iss); + break; + +#ifdef CONFIG_ARM_64 + case HSR_EC_HVC64: +#ifndef NDEBUG if ( (hsr.iss & 0xff00) == 0xff00 ) return do_debug_trap(regs, hsr.iss & 0x00ff); - do_trap_hypercall(regs, hsr.iss); +#endif + do_trap_hypercall(regs, ®s->x16, hsr.iss); break; +#endif case HSR_EC_DATA_ABORT_GUEST: do_trap_data_abort_guest(regs, hsr.dabt); break; diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h index 32c441b..17f5465 100644 --- a/xen/include/asm-arm/processor.h +++ b/xen/include/asm-arm/processor.h @@ -86,8 +86,11 @@ #define HSR_EC_JAZELLE 0x09 #define HSR_EC_BXJ 0x0a #define HSR_EC_CP14_64 0x0c -#define HSR_EC_SVC 0x11 -#define HSR_EC_HVC 0x12 +#define HSR_EC_SVC32 0x11 +#define HSR_EC_HVC32 0x12 +#ifdef CONFIG_ARM_64 +#define HSR_EC_HVC64 0x16 +#endif #define HSR_EC_INSTR_ABORT_GUEST 0x20 #define HSR_EC_INSTR_ABORT_HYP 0x21 #define HSR_EC_DATA_ABORT_GUEST 0x24 -- 1.7.2.5
Ian Campbell
2013-Apr-17 13:16 UTC
[PATCH 12/13] xen: arm: handle 64-bit system register access traps.
Wire up the vtimer handling to it. Use a simplified version of the 32-bit cp-register macros to have convenient decoding of HSR register values. (simplified because we don''t need them for passing to the assembler on 64-bit) Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> --- xen/arch/arm/traps.c | 39 ++++++++++++ xen/arch/arm/vtimer.c | 127 ++++++++++++++++++++++++++------------- xen/include/asm-arm/processor.h | 32 ++++++++++ xen/include/asm-arm/sysregs.h | 56 +++++++++++++++++ 4 files changed, 212 insertions(+), 42 deletions(-) create mode 100644 xen/include/asm-arm/sysregs.h diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index 190d1e8..67d9072 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -973,6 +973,39 @@ static void do_cp15_64(struct cpu_user_regs *regs, } +#ifdef CONFIG_ARM_64 +static void do_sysreg(struct cpu_user_regs *regs, + union hsr hsr) +{ + struct hsr_sysreg sysreg = hsr.sysreg; + + switch ( hsr.bits & HSR_SYSREG_REGS_MASK ) + { + case CNTP_CTL_EL0: + case CNTP_TVAL_EL0: + if ( !vtimer_emulate(regs, hsr) ) + { + dprintk(XENLOG_ERR, + "failed emulation of 64-bit vtimer sysreg access\n"); + domain_crash_synchronous(); + } + break; + default: + printk("%s %d, %d, c%d, c%d, %d %s x%d @ 0x%"PRIregister"\n", + sysreg.read ? "mrs" : "msr", + sysreg.op0, sysreg.op1, + sysreg.crn, sysreg.crm, + sysreg.op2, + sysreg.read ? "=>" : "<=", + sysreg.reg, regs->pc); + panic("unhandled 64-bit sysreg access %#x\n", + hsr.bits & HSR_SYSREG_REGS_MASK); + } + + regs->pc += 4; +} +#endif + void dump_guest_s1_walk(struct domain *d, vaddr_t addr) { uint32_t ttbcr = READ_SYSREG32(TCR_EL1); @@ -1114,7 +1147,13 @@ asmlinkage void do_trap_hypervisor(struct cpu_user_regs *regs) #endif do_trap_hypercall(regs, ®s->x16, hsr.iss); break; + case HSR_EC_SYSREG: + if ( is_pv32_domain(current->domain) ) + goto bad_trap; + do_sysreg(regs, hsr); + break; #endif + case HSR_EC_DATA_ABORT_GUEST: do_trap_data_abort_guest(regs, hsr.dabt); break; diff --git a/xen/arch/arm/vtimer.c b/xen/arch/arm/vtimer.c index 1cb365e..375d8ba 100644 --- a/xen/arch/arm/vtimer.c +++ b/xen/arch/arm/vtimer.c @@ -102,55 +102,66 @@ int virt_timer_restore(struct vcpu *v) return 0; } -static int vtimer_emulate_32(struct cpu_user_regs *regs, union hsr hsr) +static void vtimer_cntp_ctl(struct cpu_user_regs *regs, uint32_t *r, int read) { struct vcpu *v = current; - struct hsr_cp32 cp32 = hsr.cp32; - uint32_t *r = (uint32_t *)select_user_reg(regs, cp32.reg); - s_time_t now; - - switch ( hsr.bits & HSR_CP32_REGS_MASK ) + if ( read ) { - case HSR_CPREG32(CNTP_CTL): - if ( cp32.read ) + *r = v->arch.phys_timer.ctl; + } + else + { + uint32_t ctl = *r & ~CNTx_CTL_PENDING; + if ( ctl & CNTx_CTL_ENABLE ) + ctl |= v->arch.phys_timer.ctl & CNTx_CTL_PENDING; + v->arch.phys_timer.ctl = ctl; + + if ( v->arch.phys_timer.ctl & CNTx_CTL_ENABLE ) { - *r = v->arch.phys_timer.ctl; + set_timer(&v->arch.phys_timer.timer, + v->arch.phys_timer.cval + v->arch.phys_timer.offset); } else + stop_timer(&v->arch.phys_timer.timer); + } +} + +static void vtimer_cntp_tval(struct cpu_user_regs *regs, uint32_t *r, int read) +{ + struct vcpu *v = current; + s_time_t now; + + now = NOW() - v->arch.phys_timer.offset; + + if ( read ) + { + *r = (uint32_t)(ns_to_ticks(v->arch.phys_timer.cval - now) & 0xffffffffull); + } + else + { + v->arch.phys_timer.cval = now + ticks_to_ns(*r); + if ( v->arch.phys_timer.ctl & CNTx_CTL_ENABLE ) { - uint32_t ctl = *r & ~CNTx_CTL_PENDING; - if ( ctl & CNTx_CTL_ENABLE ) - ctl |= v->arch.phys_timer.ctl & CNTx_CTL_PENDING; - v->arch.phys_timer.ctl = ctl; - - if ( v->arch.phys_timer.ctl & CNTx_CTL_ENABLE ) - { - set_timer(&v->arch.phys_timer.timer, - v->arch.phys_timer.cval + v->arch.phys_timer.offset); - } - else - stop_timer(&v->arch.phys_timer.timer); + v->arch.phys_timer.ctl &= ~CNTx_CTL_PENDING; + set_timer(&v->arch.phys_timer.timer, + v->arch.phys_timer.cval + v->arch.phys_timer.offset); } + } +} +static int vtimer_emulate_cp32(struct cpu_user_regs *regs, union hsr hsr) +{ + struct hsr_cp32 cp32 = hsr.cp32; + uint32_t *r = (uint32_t *)select_user_reg(regs, cp32.reg); + + switch ( hsr.bits & HSR_CP32_REGS_MASK ) + { + case HSR_CPREG32(CNTP_CTL): + vtimer_cntp_ctl(regs, r, cp32.read); return 1; case HSR_CPREG32(CNTP_TVAL): - now = NOW() - v->arch.phys_timer.offset; - if ( cp32.read ) - { - *r = (uint32_t)(ns_to_ticks(v->arch.phys_timer.cval - now) & 0xffffffffull); - } - else - { - v->arch.phys_timer.cval = now + ticks_to_ns(*r); - if ( v->arch.phys_timer.ctl & CNTx_CTL_ENABLE ) - { - v->arch.phys_timer.ctl &= ~CNTx_CTL_PENDING; - set_timer(&v->arch.phys_timer.timer, - v->arch.phys_timer.cval + v->arch.phys_timer.offset); - } - } - + vtimer_cntp_tval(regs, r, cp32.read); return 1; default: @@ -158,7 +169,7 @@ static int vtimer_emulate_32(struct cpu_user_regs *regs, union hsr hsr) } } -static int vtimer_emulate_64(struct cpu_user_regs *regs, union hsr hsr) +static int vtimer_emulate_cp64(struct cpu_user_regs *regs, union hsr hsr) { struct vcpu *v = current; struct hsr_cp64 cp64 = hsr.cp64; @@ -189,16 +200,48 @@ static int vtimer_emulate_64(struct cpu_user_regs *regs, union hsr hsr) } } +#ifdef CONFIG_ARM_64 +static int vtimer_emulate_sysreg(struct cpu_user_regs *regs, union hsr hsr) +{ + struct hsr_sysreg sysreg = hsr.sysreg; + register_t *x = select_user_reg(regs, sysreg.reg); + uint32_t r = (uint32_t)*x; + + switch ( hsr.bits & HSR_SYSREG_REGS_MASK ) + { + case CNTP_CTL_EL0: + vtimer_cntp_ctl(regs, &r, sysreg.read); + *x = r; + return 1; + case CNTP_TVAL_EL0: + vtimer_cntp_tval(regs, &r, sysreg.read); + *x = r; + return 1; + default: + return 0; + } + +} +#endif + int vtimer_emulate(struct cpu_user_regs *regs, union hsr hsr) { - if ( !is_pv32_domain(current->domain) ) - return -EINVAL; switch (hsr.ec) { case HSR_EC_CP15_32: - return vtimer_emulate_32(regs, hsr); + if ( !is_pv32_domain(current->domain) ) + return 0; + return vtimer_emulate_cp32(regs, hsr); case HSR_EC_CP15_64: - return vtimer_emulate_64(regs, hsr); + if ( !is_pv32_domain(current->domain) ) + return 0; + return vtimer_emulate_cp64(regs, hsr); +#ifdef CONFIG_ARM_64 + case HSR_EC_SYSREG: + if ( is_pv32_domain(current->domain) ) + return 0; + return vtimer_emulate_sysreg(regs, hsr); +#endif default: return 0; } diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h index 17f5465..d75530d 100644 --- a/xen/include/asm-arm/processor.h +++ b/xen/include/asm-arm/processor.h @@ -2,6 +2,7 @@ #define __ASM_ARM_PROCESSOR_H #include <asm/cpregs.h> +#include <asm/sysregs.h> /* MIDR Main ID Register */ #define MIDR_MASK 0xff0ffff0 @@ -90,6 +91,7 @@ #define HSR_EC_HVC32 0x12 #ifdef CONFIG_ARM_64 #define HSR_EC_HVC64 0x16 +#define HSR_EC_SYSREG 0x18 #endif #define HSR_EC_INSTR_ABORT_GUEST 0x20 #define HSR_EC_INSTR_ABORT_HYP 0x21 @@ -249,6 +251,21 @@ union hsr { unsigned long ec:6; /* Exception Class */ } cp64; /* HSR_EC_CP15_64, HSR_EC_CP14_64 */ +#ifdef CONFIG_ARM_64 + struct hsr_sysreg { + unsigned long read:1; /* Direction */ + unsigned long crm:4; /* CRm */ + unsigned long reg:5; /* Rt */ + unsigned long crn:4; /* CRn */ + unsigned long op1:3; /* Op1 */ + unsigned long op2:3; /* Op2 */ + unsigned long op0:2; /* Op0 */ + unsigned long res0:3; + unsigned long len:1; /* Instruction length */ + unsigned long ec:6; + } sysreg; /* HSR_EC_SYSREG */ +#endif + struct hsr_dabt { unsigned long dfsc:6; /* Data Fault Status Code */ unsigned long write:1; /* Write / not Read */ @@ -291,6 +308,21 @@ union hsr { #define HSR_CP64_CRM_SHIFT (1) #define HSR_CP64_REGS_MASK (HSR_CP64_OP1_MASK|HSR_CP64_CRM_MASK) +/* HSR.EC == HSR_SYSREG */ +#define HSR_SYSREG_OP0_MASK (0x00300000) +#define HSR_SYSREG_OP0_SHIFT (20) +#define HSR_SYSREG_OP1_MASK (0x0001c000) +#define HSR_SYSREG_OP1_SHIFT (14) +#define HSR_SYSREG_CRN_MASK (0x00003800) +#define HSR_SYSREG_CRN_SHIFT (10) +#define HSR_SYSREG_CRM_MASK (0x0000001e) +#define HSR_SYSREG_CRM_SHIFT (1) +#define HSR_SYSREG_OP2_MASK (0x000e0000) +#define HSR_SYSREG_OP2_SHIFT (17) +#define HSR_SYSREG_REGS_MASK (HSR_SYSREG_OP0_MASK|HSR_SYSREG_OP1_MASK|\ + HSR_SYSREG_CRN_MASK|HSR_SYSREG_CRM_MASK|\ + HSR_SYSREG_OP2_MASK) + /* Physical Address Register */ #define PAR_F (1<<0) diff --git a/xen/include/asm-arm/sysregs.h b/xen/include/asm-arm/sysregs.h new file mode 100644 index 0000000..9c64777 --- /dev/null +++ b/xen/include/asm-arm/sysregs.h @@ -0,0 +1,56 @@ +#ifndef __ASM_ARM_SYSREGS_H +#define __ASM_ARM_SYSREGS_H + +#ifdef CONFIG_ARM_64 + +#include <xen/stringify.h> + +/* AArch 64 System Register Encodings */ +#define __HSR_SYSREG_c0 0 +#define __HSR_SYSREG_c1 1 +#define __HSR_SYSREG_c2 2 +#define __HSR_SYSREG_c3 3 +#define __HSR_SYSREG_c4 4 +#define __HSR_SYSREG_c5 5 +#define __HSR_SYSREG_c6 6 +#define __HSR_SYSREG_c7 7 +#define __HSR_SYSREG_c8 8 +#define __HSR_SYSREG_c9 9 +#define __HSR_SYSREG_c10 10 +#define __HSR_SYSREG_c11 11 +#define __HSR_SYSREG_c12 12 +#define __HSR_SYSREG_c13 13 +#define __HSR_SYSREG_c14 14 +#define __HSR_SYSREG_c15 15 + +#define __HSR_SYSREG_0 0 +#define __HSR_SYSREG_1 1 +#define __HSR_SYSREG_2 2 +#define __HSR_SYSREG_3 3 +#define __HSR_SYSREG_4 4 +#define __HSR_SYSREG_5 5 +#define __HSR_SYSREG_6 6 +#define __HSR_SYSREG_7 7 + +/* These are used to decode traps with HSR.EC==HSR_EC_SYSREG */ +#define HSR_SYSREG(op0,op1,crn,crm,op2) \ + ((__HSR_SYSREG_##op0) << HSR_SYSREG_OP0_SHIFT) | \ + ((__HSR_SYSREG_##op1) << HSR_SYSREG_OP1_SHIFT) | \ + ((__HSR_SYSREG_##crn) << HSR_SYSREG_CRN_SHIFT) | \ + ((__HSR_SYSREG_##crm) << HSR_SYSREG_CRM_SHIFT) | \ + ((__HSR_SYSREG_##op2) << HSR_SYSREG_OP2_SHIFT) + +#define CNTP_CTL_EL0 HSR_SYSREG(3,3,c14,c2,1) +#define CNTP_TVAL_EL0 HSR_SYSREG(3,3,c14,c2,0) +#endif + +#endif + +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * indent-tabs-mode: nil + * End: + */ -- 1.7.2.5
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> --- xen/include/asm-arm/processor.h | 8 ++++---- 1 files changed, 4 insertions(+), 4 deletions(-) diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h index d75530d..53ffce2 100644 --- a/xen/include/asm-arm/processor.h +++ b/xen/include/asm-arm/processor.h @@ -244,11 +244,11 @@ union hsr { unsigned long reg1:5; /* Rt1 */ unsigned long reg2:5; /* Rt2 */ unsigned long sbzp2:1; - unsigned long op1:4; /* Op1 */ - unsigned long cc:4; /* Condition Code */ + unsigned long op1:4; /* Op1 */ + unsigned long cc:4; /* Condition Code */ unsigned long ccvalid:1;/* CC Valid */ - unsigned long len:1; /* Instruction length */ - unsigned long ec:6; /* Exception Class */ + unsigned long len:1; /* Instruction length */ + unsigned long ec:6; /* Exception Class */ } cp64; /* HSR_EC_CP15_64, HSR_EC_CP14_64 */ #ifdef CONFIG_ARM_64 -- 1.7.2.5
George Dunlap
2013-Apr-17 13:28 UTC
Re: [PATCH 00/13 V2] xen: arm: 64-bit dom0 kernel support
On Wed, Apr 17, 2013 at 2:16 PM, Ian Campbell <Ian.Campbell@citrix.com> wrote:> This series has been rebased (on top of my SMP one, also just reposted) > and acks have been accumulated. I''ve replied to the feedback but there > wasn''t any changes left to make after I had done so. > > From a release perspective this only touches ARM code and I think 64-bit > support (even if only dom0) is worth the risk to 32-bit ARM at this > stage in the freeze.I''m OK with it if Stefano is OK with it. -George
Stefano Stabellini
2013-Apr-18 11:39 UTC
Re: [PATCH 00/13 V2] xen: arm: 64-bit dom0 kernel support
On Wed, 17 Apr 2013, George Dunlap wrote:> On Wed, Apr 17, 2013 at 2:16 PM, Ian Campbell <Ian.Campbell@citrix.com> wrote: > > This series has been rebased (on top of my SMP one, also just reposted) > > and acks have been accumulated. I''ve replied to the feedback but there > > wasn''t any changes left to make after I had done so. > > > > From a release perspective this only touches ARM code and I think 64-bit > > support (even if only dom0) is worth the risk to 32-bit ARM at this > > stage in the freeze. > > I''m OK with it if Stefano is OK with it.I am OK with it
Stefano Stabellini
2013-Apr-25 18:14 UTC
Re: [PATCH 03/13] xen: arm: support for loading 64-bit zImage dom0
On Wed, 17 Apr 2013, Ian Campbell wrote:> This is defined in linux/Documentation/arm64/booting.txt. > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>> xen/arch/arm/kernel.c | 80 +++++++++++++++++++++++++++++++++++++++++++++--- > 1 files changed, 75 insertions(+), 5 deletions(-) > > diff --git a/xen/arch/arm/kernel.c b/xen/arch/arm/kernel.c > index e7f34a4..59844b0 100644 > --- a/xen/arch/arm/kernel.c > +++ b/xen/arch/arm/kernel.c > @@ -26,6 +26,8 @@ > > #define ZIMAGE32_MAGIC 0x016f2818 > > +#define ZIMAGE64_MAGIC 0x14000008 > + > struct minimal_dtb_header { > uint32_t magic; > uint32_t total_size; > @@ -99,6 +101,57 @@ static void kernel_zimage_load(struct kernel_info *info) > } > } > > +#ifdef CONFIG_ARM_64 > +/* > + * Check if the image is a 64-bit zImage and setup kernel_info > + */ > +static int kernel_try_zimage64_prepare(struct kernel_info *info, > + paddr_t addr, paddr_t size) > +{ > + /* linux/Documentation/arm64/booting.txt */ > + struct { > + uint32_t magic; > + uint32_t res0; > + uint64_t text_offset; /* Image load offset */ > + uint64_t res1; > + uint64_t res2; > + } zimage; > + uint64_t start, end; > + > + if ( size < sizeof(zimage) ) > + return -EINVAL; > + > + copy_from_paddr(&zimage, addr, sizeof(zimage), DEV_SHARED); > + > + if (zimage.magic != ZIMAGE64_MAGIC) > + return -EINVAL; > + > + /* Currently there is no length in the header, so just use the size */ > + start = 0; > + end = size; > + > + /* > + * Given the above this check is a bit pointless, but leave it > + * here in case someone adds a length field in the future. > + */ > + if ( (end - start) > size ) > + return -EINVAL; > + > + info->zimage.kernel_addr = addr; > + > + info->zimage.load_addr = info->mem.bank[0].start > + + zimage.text_offset; > + info->zimage.len = end - start; > + > + info->entry = info->zimage.load_addr; > + info->load = kernel_zimage_load; > + > + info->type = DOMAIN_PV64; > + > + return 0; > +} > +#endif > + > /* > * Check if the image is a 32-bit zImage and setup kernel_info > */ > @@ -153,6 +206,10 @@ static int kernel_try_zimage32_prepare(struct kernel_info *info, > info->entry = info->zimage.load_addr; > info->load = kernel_zimage_load; > > +#ifdef CONFIG_ARM_64 > + info->type = DOMAIN_PV32; > +#endif > + > return 0; > } > > @@ -187,6 +244,19 @@ static int kernel_try_elf_prepare(struct kernel_info *info, > if ( (rc = elf_xen_parse(&info->elf.elf, &info->elf.parms)) != 0 ) > goto err; > > +#ifdef CONFIG_ARM_64 > + if ( elf_32bit(&info->elf.elf) ) > + info->type = DOMAIN_PV32; > + else if ( elf_64bit(&info->elf.elf) ) > + info->type = DOMAIN_PV64; > + else > + { > + printk("Unknown ELF class\n"); > + rc = -EINVAL; > + goto err; > + } > +#endif > + > /* > * TODO: can the ELF header be used to find the physical address > * to load the image to? Instead of assuming virt == phys. > @@ -224,13 +294,13 @@ int kernel_prepare(struct kernel_info *info) > info->load_attr = BUFFERABLE; > } > > - rc = kernel_try_zimage32_prepare(info, start, size); > - if (rc < 0) > - rc = kernel_try_elf_prepare(info, start, size); > - > #ifdef CONFIG_ARM_64 > - info->type = DOMAIN_PV32; /* No 64-bit guest support yet */ > + rc = kernel_try_zimage64_prepare(info, start, size); > + if (rc < 0) > #endif > + rc = kernel_try_zimage32_prepare(info, start, size); > + if (rc < 0) > + rc = kernel_try_elf_prepare(info, start, size); > > return rc; > } > -- > 1.7.2.5 >
Stefano Stabellini
2013-Apr-25 18:14 UTC
Re: [PATCH 02/13] xen: arm: rename 32-bit specific zImage field offset constants
On Wed, 17 Apr 2013, Ian Campbell wrote:> This will help avoid confusion when 64-bit Image support is added. > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>> xen/arch/arm/kernel.c | 28 ++++++++++++++-------------- > 1 files changed, 14 insertions(+), 14 deletions(-) > > diff --git a/xen/arch/arm/kernel.c b/xen/arch/arm/kernel.c > index 8f4a60d..e7f34a4 100644 > --- a/xen/arch/arm/kernel.c > +++ b/xen/arch/arm/kernel.c > @@ -19,12 +19,12 @@ > #define KERNEL_FLASH_ADDRESS 0x00000000UL > #define KERNEL_FLASH_SIZE 0x00800000UL > > -#define ZIMAGE_MAGIC_OFFSET 0x24 > -#define ZIMAGE_START_OFFSET 0x28 > -#define ZIMAGE_END_OFFSET 0x2c > -#define ZIMAGE_HEADER_LEN 0x30 > +#define ZIMAGE32_MAGIC_OFFSET 0x24 > +#define ZIMAGE32_START_OFFSET 0x28 > +#define ZIMAGE32_END_OFFSET 0x2c > +#define ZIMAGE32_HEADER_LEN 0x30 > > -#define ZIMAGE_MAGIC 0x016f2818 > +#define ZIMAGE32_MAGIC 0x016f2818 > > struct minimal_dtb_header { > uint32_t magic; > @@ -99,26 +99,26 @@ static void kernel_zimage_load(struct kernel_info *info) > } > } > > -/** > - * Check the image is a zImage and return the load address and length > +/* > + * Check if the image is a 32-bit zImage and setup kernel_info > */ > -static int kernel_try_zimage_prepare(struct kernel_info *info, > +static int kernel_try_zimage32_prepare(struct kernel_info *info, > paddr_t addr, paddr_t size) > { > - uint32_t zimage[ZIMAGE_HEADER_LEN/4]; > + uint32_t zimage[ZIMAGE32_HEADER_LEN/4]; > uint32_t start, end; > struct minimal_dtb_header dtb_hdr; > > - if ( size < ZIMAGE_HEADER_LEN ) > + if ( size < ZIMAGE32_HEADER_LEN ) > return -EINVAL; > > copy_from_paddr(zimage, addr, sizeof(zimage), DEV_SHARED); > > - if (zimage[ZIMAGE_MAGIC_OFFSET/4] != ZIMAGE_MAGIC) > + if (zimage[ZIMAGE32_MAGIC_OFFSET/4] != ZIMAGE32_MAGIC) > return -EINVAL; > > - start = zimage[ZIMAGE_START_OFFSET/4]; > - end = zimage[ZIMAGE_END_OFFSET/4]; > + start = zimage[ZIMAGE32_START_OFFSET/4]; > + end = zimage[ZIMAGE32_END_OFFSET/4]; > > if ( (end - start) > size ) > return -EINVAL; > @@ -224,7 +224,7 @@ int kernel_prepare(struct kernel_info *info) > info->load_attr = BUFFERABLE; > } > > - rc = kernel_try_zimage_prepare(info, start, size); > + rc = kernel_try_zimage32_prepare(info, start, size); > if (rc < 0) > rc = kernel_try_elf_prepare(info, start, size); > > -- > 1.7.2.5 >
Stefano Stabellini
2013-Apr-25 18:26 UTC
Re: [PATCH 09/13] xen: arm: Set EL1 register width in HCR_EL2 during context switch.
On Wed, 17 Apr 2013, Ian Campbell wrote:> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>I would be tempted to get rid of all these ifdef and just rely on the fact that on ARMv7 is_pv32_domain always returns 1.> xen/arch/arm/domain.c | 7 +++++++ > xen/include/asm-arm/processor.h | 3 +++ > 2 files changed, 10 insertions(+), 0 deletions(-) > > diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c > index 08bb132..20bb6c7 100644 > --- a/xen/arch/arm/domain.c > +++ b/xen/arch/arm/domain.c > @@ -203,6 +203,13 @@ static void ctxt_switch_to(struct vcpu *n) > > isb(); > > +#ifdef CONFIG_ARM_64 > + if ( is_pv32_domain(n->domain) ) > + hcr &= ~HCR_RW; > + else > + hcr |= HCR_RW; > +#endif > + > WRITE_SYSREG(hcr, HCR_EL2); > isb(); > > diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h > index 1681ebf..32c441b 100644 > --- a/xen/include/asm-arm/processor.h > +++ b/xen/include/asm-arm/processor.h > @@ -41,6 +41,9 @@ > #define HSCTLR_BASE 0x30c51878 > > /* HCR Hyp Configuration Register */ > +#ifdef CONFIG_ARM_64 > +#define HCR_RW (1<<31) > +#endif > #define HCR_TGE (1<<27) > #define HCR_TVM (1<<26) > #define HCR_TTLB (1<<25) > -- > 1.7.2.5 >
Stefano Stabellini
2013-Apr-25 18:32 UTC
Re: [PATCH 01/13] xen: arm: tweak arm64 stack frame layout
On Wed, 17 Apr 2013, Ian Campbell wrote:> Correct definition of UREGS_kernel_sizeof and use it. > > Correct adjustment of stack on entry and exit. > > Add 64-bit versions of the build time checks for stack pointer alignment > correctness when pushing the stack frames. > > Lastly, correct the padding in the stack frames to properly align the inner and > outer frames and also avoid an unnecessary 64bit padding field. > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>> xen/arch/arm/arm64/asm-offsets.c | 2 +- > xen/arch/arm/arm64/entry.S | 9 +++++---- > xen/arch/arm/domain.c | 2 ++ > xen/arch/arm/traps.c | 7 +++++++ > xen/include/asm-arm/arm64/processor.h | 7 +++---- > 5 files changed, 18 insertions(+), 9 deletions(-) > > diff --git a/xen/arch/arm/arm64/asm-offsets.c b/xen/arch/arm/arm64/asm-offsets.c > index 7949e3e..7544082 100644 > --- a/xen/arch/arm/arm64/asm-offsets.c > +++ b/xen/arch/arm/arm64/asm-offsets.c > @@ -39,7 +39,7 @@ void __dummy__(void) > OFFSET(UREGS_SP_el1, struct cpu_user_regs, sp_el1); > OFFSET(UREGS_ELR_el1, struct cpu_user_regs, elr_el1); > > - OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, cpsr); > + OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, spsr_el1); > DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs)); > BLANK(); > > diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S > index 5656f45..b5af1e2 100644 > --- a/xen/arch/arm/arm64/entry.S > +++ b/xen/arch/arm/arm64/entry.S > @@ -35,7 +35,7 @@ lr .req x30 // link register > mrs x22, SP_el0 > str x22, [x21] > > - add x21, sp, #UREGS_ELR_el1 > + add x21, sp, #UREGS_SP_el1 > mrs x22, SP_el1 > mrs x23, ELR_el1 > stp x22, x23, [x21] > @@ -60,7 +60,7 @@ lr .req x30 // link register > * Save state on entry to hypervisor > */ > .macro entry, hyp, compat > - sub sp, sp, #(UREGS_SPSR_el1 - UREGS_SP) > + sub sp, sp, #(UREGS_SPSR_el1 - UREGS_LR) /* CPSR, PC, SP, LR */ > push x28, x29 > push x26, x27 > push x24, x25 > @@ -79,7 +79,7 @@ lr .req x30 // link register > > .if \hyp == 1 /* Hypervisor mode */ > > - add x21, sp, #(UREGS_X0 - UREGS_SP) > + add x21, sp, #UREGS_kernel_sizeof > > .else /* Guest mode */ > > @@ -214,7 +214,8 @@ ENTRY(return_to_hypervisor) > pop x26, x27 > pop x28, x29 > > - ldr lr, [sp], #(UREGS_SPSR_el1 - UREGS_SP) > + ldr lr, [sp], #(UREGS_SPSR_el1 - UREGS_LR) /* CPSR, PC, SP, LR */ > + > eret > > /* > diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c > index f369871..08bb132 100644 > --- a/xen/arch/arm/domain.c > +++ b/xen/arch/arm/domain.c > @@ -419,6 +419,8 @@ int vcpu_initialise(struct vcpu *v) > { > int rc = 0; > > + BUILD_BUG_ON( sizeof(struct cpu_info) > STACK_SIZE ); > + > v->arch.stack = alloc_xenheap_pages(STACK_ORDER, MEMF_node(vcpu_to_node(v))); > if ( v->arch.stack == NULL ) > return -ENOMEM; > diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c > index a98a45e..6b19bc5 100644 > --- a/xen/arch/arm/traps.c > +++ b/xen/arch/arm/traps.c > @@ -43,9 +43,16 @@ > * entry.S) and struct cpu_info (which lives at the bottom of a Xen > * stack) must be doubleword-aligned in size. */ > static inline void check_stack_alignment_constraints(void) { > +#ifdef CONFIG_ARM_64 > + BUILD_BUG_ON((sizeof (struct cpu_user_regs)) & 0xf); > + BUILD_BUG_ON((offsetof(struct cpu_user_regs, spsr_el1)) & 0xf); > + BUILD_BUG_ON((offsetof(struct cpu_user_regs, lr)) & 0xf); > + BUILD_BUG_ON((sizeof (struct cpu_info)) & 0xf); > +#else > BUILD_BUG_ON((sizeof (struct cpu_user_regs)) & 0x7); > BUILD_BUG_ON((offsetof(struct cpu_user_regs, sp_usr)) & 0x7); > BUILD_BUG_ON((sizeof (struct cpu_info)) & 0x7); > +#endif > } > > static int debug_stack_lines = 20; > diff --git a/xen/include/asm-arm/arm64/processor.h b/xen/include/asm-arm/arm64/processor.h > index b4602fa..bf436c8 100644 > --- a/xen/include/asm-arm/arm64/processor.h > +++ b/xen/include/asm-arm/arm64/processor.h > @@ -51,6 +51,7 @@ struct cpu_user_regs > __DECL_REG(x27, r11_fiq); > __DECL_REG(x28, r12_fiq); > __DECL_REG(/* x29 */ fp, /* r13_fiq */ sp_fiq); > + > __DECL_REG(/* x30 */ lr, /* r14_fiq */ lr_fiq); > > register_t sp; /* Valid for hypervisor frames */ > @@ -59,7 +60,7 @@ struct cpu_user_regs > __DECL_REG(pc, pc32); /* ELR_EL2 */ > uint32_t cpsr; /* SPSR_EL2 */ > > - uint64_t pad0; > + uint32_t pad0; /* Align end of kernel frame. */ > > /* Outer guest frame only from here on... */ > > @@ -68,7 +69,7 @@ struct cpu_user_regs > uint32_t spsr_svc; /* AArch32 */ > }; > > - uint32_t pad1; /* Align */ > + uint32_t pad1; /* Doubleword-align the user half of the frame */ > > /* AArch32 guests only */ > uint32_t spsr_fiq, spsr_irq, spsr_und, spsr_abt; > @@ -76,8 +77,6 @@ struct cpu_user_regs > /* AArch64 guests only */ > uint64_t sp_el0; > uint64_t sp_el1, elr_el1; > - > - uint64_t pad2; /* Doubleword-align the user half of the frame */ > }; > > #undef __DECL_REG > -- > 1.7.2.5 >
Ian Campbell
2013-Apr-26 08:11 UTC
Re: [PATCH 09/13] xen: arm: Set EL1 register width in HCR_EL2 during context switch.
On Thu, 2013-04-25 at 19:26 +0100, Stefano Stabellini wrote:> On Wed, 17 Apr 2013, Ian Campbell wrote: > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > > I would be tempted to get rid of all these ifdef and just rely on the > fact that on ARMv7 is_pv32_domain always returns 1.Yes, good idea.