This patch series implements SMP boot for arch/arm, as far as getting all CPUs up and running the idle loop. Changes from v1: - moved barriers out of loop in udelay() - dropped broken GIC change in favour of explanatory comment - made the increment of ready_cpus atomic (I couldn''t move the increment to before signalling the next CPU because the PT switch has to happen between them) - Made secondary CPUs come up directly on the idle vcpus'' stacks, avoiding the whole business of allocating and tracking boot stacks. - Added a new patch to do late-boot MMU fixups on secondary CPUs. Cheers, Tim.
# HG changeset patch # User Tim Deegan <tim@xen.org> # Date 1331300346 0 # Node ID 7a00e582027ed2793f4ee393f94ddf790b61c39f # Parent 10c5ba0b5af2dd209d4f7d27649dea5827652d9c arm: implement udelay() Signed-off-by: Tim Deegan <tim@xen.org> diff -r 10c5ba0b5af2 -r 7a00e582027e xen/arch/arm/dummy.S --- a/xen/arch/arm/dummy.S Fri Mar 09 09:58:41 2012 +0000 +++ b/xen/arch/arm/dummy.S Fri Mar 09 13:39:06 2012 +0000 @@ -62,5 +62,4 @@ DUMMY(gmfn_to_mfn); DUMMY(hypercall_create_continuation); DUMMY(send_timer_event); DUMMY(share_xen_page_with_privileged_guests); -DUMMY(__udelay); DUMMY(wallclock_time); diff -r 10c5ba0b5af2 -r 7a00e582027e xen/arch/arm/time.c --- a/xen/arch/arm/time.c Fri Mar 09 09:58:41 2012 +0000 +++ b/xen/arch/arm/time.c Fri Mar 09 13:39:06 2012 +0000 @@ -171,6 +171,16 @@ void __cpuinit init_timer_interrupt(void request_irq(30, timer_interrupt, 0, "phytimer", NULL); } +/* Wait a set number of microseconds */ +void udelay(unsigned long usecs) +{ + s_time_t deadline = get_s_time() + 1000 * (s_time_t) usecs; + while ( get_s_time() - deadline < 0 ) + ; + dsb(); + isb(); +} + /* * Local variables: * mode: C diff -r 10c5ba0b5af2 -r 7a00e582027e xen/include/asm-arm/delay.h --- a/xen/include/asm-arm/delay.h Fri Mar 09 09:58:41 2012 +0000 +++ b/xen/include/asm-arm/delay.h Fri Mar 09 13:39:06 2012 +0000 @@ -1,8 +1,7 @@ #ifndef _ARM_DELAY_H #define _ARM_DELAY_H -extern void __udelay(unsigned long usecs); -#define udelay(n) __udelay(n) +extern void udelay(unsigned long usecs); #endif /* defined(_ARM_DELAY_H) */ /*
Tim Deegan
2012-Mar-09 13:43 UTC
[PATCH 2 of 9] arm: Add a comment explaining the GICD writes in the GICC init function
# HG changeset patch # User Tim Deegan <tim@xen.org> # Date 1331300346 0 # Node ID 51da35a2e502779efb78402013da38a1960a6129 # Parent 7a00e582027ed2793f4ee393f94ddf790b61c39f arm: Add a comment explaining the GICD writes in the GICC init function. Signed-off-by: Tim Deegan <tim@xen.org> diff -r 7a00e582027e -r 51da35a2e502 xen/arch/arm/gic.c --- a/xen/arch/arm/gic.c Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/arch/arm/gic.c Fri Mar 09 13:39:06 2012 +0000 @@ -224,7 +224,9 @@ static void __cpuinit gic_cpu_init(void) { int i; - /* Disable all PPI and enable all SGI */ + /* The first 32 interrupts (PPI and SGI) are banked per-cpu, so + * even though they are controlled with GICD registers, they must + * be set up here with the other per-cpu state. */ GICD[GICD_ICENABLER] = 0xffff0000; /* Disable all PPI */ GICD[GICD_ISENABLER] = 0x0000ffff; /* Enable all SGI */ /* Set PPI and SGI priorities */
# HG changeset patch # User Tim Deegan <tim@xen.org> # Date 1331300346 0 # Node ID ef66ce0f4fb3bfe72acd691e4db33f0c1ea9137d # Parent 51da35a2e502779efb78402013da38a1960a6129 arm: More SMP bringup Bring non-boot CPUs up as far as running on the relocated pagetables, one at a time, before the non-relocated copy of Xen gets reused for general memory pools. Don''t yet bring them up into C; that will happen later when stacks are allocated. Signed-off-by: Tim Deegan <tim@xen.org> diff -r 51da35a2e502 -r ef66ce0f4fb3 xen/arch/arm/gic.c --- a/xen/arch/arm/gic.c Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/arch/arm/gic.c Fri Mar 09 13:39:06 2012 +0000 @@ -252,7 +252,7 @@ static void __cpuinit gic_hyp_init(void) } /* Set up the GIC */ -void gic_init(void) +int __init gic_init(void) { /* XXX FIXME get this from devicetree */ gic.dbase = GIC_BASE_ADDRESS + GIC_DR_OFFSET; @@ -274,6 +274,8 @@ void gic_init(void) gic_hyp_init(); spin_unlock(&gic.lock); + + return gic.cpus; } void gic_route_irqs(void) diff -r 51da35a2e502 -r ef66ce0f4fb3 xen/arch/arm/gic.h --- a/xen/arch/arm/gic.h Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/arch/arm/gic.h Fri Mar 09 13:39:06 2012 +0000 @@ -138,8 +138,8 @@ extern int gic_route_irq_to_guest(struct /* Accept an interrupt from the GIC and dispatch its handler */ extern void gic_interrupt(struct cpu_user_regs *regs, int is_fiq); -/* Bring up the interrupt controller */ -extern void gic_init(void); +/* Bring up the interrupt controller, and report # cpus attached */ +extern int gic_init(void); /* setup the gic virtual interface for a guest */ extern void gicv_setup(struct domain *d); #endif diff -r 51da35a2e502 -r ef66ce0f4fb3 xen/arch/arm/head.S --- a/xen/arch/arm/head.S Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/arch/arm/head.S Fri Mar 09 13:39:06 2012 +0000 @@ -62,22 +62,36 @@ start: #endif /* Are we the boot CPU? */ + mov r12, #0 /* r12 := CPU ID */ mrc CP32(r0, MPIDR) tst r0, #(1<<31) /* Multiprocessor extension supported? */ beq boot_cpu tst r0, #(1<<30) /* Uniprocessor system? */ bne boot_cpu - bics r0, r0, #(0xff << 24) /* Ignore flags */ - beq boot_cpu /* If all other fields are 0, we win */ + bics r12, r0, #(0xff << 24) /* Mask out flags to get CPU ID */ + beq boot_cpu /* If we''re CPU 0, boot now */ -1: wfi - b 1b - + /* Non-boot CPUs wait here to be woken up one at a time. + * This is basically an open-coded spin-lock to serialize. */ + ldr r0, =boot_gate /* VA of gate */ + add r0, r0, r10 /* PA of gate */ + mov r1, #1 /* (1 == locked) */ +1: wfe + ldrex r2, [r0] /* Linked read of current value */ + teq r2, #0 /* (0 == unlocked) */ + strexeq r2, r1, [r0] /* Matching update -> locked */ + teq r2, #0 /* (0 == succeeded) */ + bne 1b + boot_cpu: #ifdef EARLY_UART_ADDRESS - /* Say hello */ ldr r11, =EARLY_UART_ADDRESS /* r11 := UART base address */ - bl init_uart + teq r12, #0 /* CPU 0 sets up the UART too */ + bleq init_uart + PRINT("- CPU ") + mov r0, r12 + bl putn + PRINT(" booting -\r\n") #endif /* Check that this CPU has Hyp mode */ @@ -85,7 +99,6 @@ boot_cpu: and r0, r0, #0xf000 /* Bits 12-15 define virt extensions */ teq r0, #0x1000 /* Must == 0x1 or may be incompatible */ beq 1f - bl putn PRINT("- CPU doesn''t support the virtualization extensions -\r\n") b fail 1: @@ -185,6 +198,10 @@ hyp: mov r5, #0 /* r4:r5 is paddr (xen_pagetable) */ mcrr CP64(r4, r5, HTTBR) + /* Non-boot CPUs don''t need to rebuild the pagetable */ + teq r12, #0 + bne pt_ready + /* Build the baseline idle pagetable''s first-level entries */ ldr r1, =xen_second add r1, r1, r10 /* r1 := paddr (xen_second) */ @@ -226,6 +243,7 @@ hyp: add r4, r4, #8 strd r2, r3, [r1, r4] /* Map it in the early boot slot */ +pt_ready: PRINT("- Turning on paging -\r\n") ldr r1, =paging /* Explicit vaddr, not RIP-relative */ @@ -238,7 +256,7 @@ hyp: paging: #ifdef EARLY_UART_ADDRESS - /* Recover the UART address in the new address space */ + /* Recover the UART address in the new address space. */ lsl r11, #11 lsr r11, #11 /* UART base''s offset from 2MB base */ adr r0, start @@ -246,14 +264,59 @@ paging: add r11, r11, r0 /* r11 := vaddr (UART base address) */ #endif - PRINT("- Entering C -\r\n") + PRINT("- Ready -\r\n") + /* The boot CPU should go straight into C now */ + teq r12, #0 + beq launch + + /* Signal the next non-boot CPU to come and join us here */ + ldr r0, =boot_gate /* VA of gate */ + add r0, r0, r10 /* PA of gate */ + mov r1, #0 /* (0 == unlocked) */ + str r1, [r0] + dsb + isb + sev + + /* Move on to the relocated pagetables */ + mov r0, #0 + ldr r4, =boot_httbr /* VA of HTTBR value stashed by CPU 0 */ + add r4, r4, r10 /* PA of it */ + ldrd r4, r5, [r4] /* Actual value */ + mcrr CP64(r4, r5, HTTBR) + mcr CP32(r0, TLBIALLH) /* Flush hypervisor TLB */ + mcr CP32(r0, BPIALL) /* Flush branch predictor */ + dsb /* Ensure completion of TLB+BP flush */ + isb + /* Now, the UART is in its proper fixmap address */ + ldrne r11, =FIXMAP_ADDR(FIXMAP_CONSOLE) + + /* Non-boot CPUs report that they''ve got this far */ + ldr r0, =ready_cpus +1: ldrex r1, [r0] /* { read # of ready CPUs } */ + add r1, r1, #1 /* Atomically { ++ } */ + strex r2, r1, [r0] /* { writeback } */ + teq r2, #0 + bne 1b + dsb + + /* Here, the non-boot CPUs must wait again -- they''re now running on + * the boot CPU''s pagetables so it''s safe for the boot CPU to + * overwrite the non-relocated copy of Xen. Once it''s done that, + * and brought up the memory allocator, non-boot CPUs can get their + * own stacks and enter C. */ +1: wfe + b 1b + +launch: ldr sp, =init_stack /* Supply a stack */ add sp, #STACK_SIZE /* (which grows down from the top). */ sub sp, #CPUINFO_sizeof /* Make room for CPU save record */ mov r0, r10 /* Marshal args: - phys_offset */ mov r1, r7 /* - machine type */ mov r2, r8 /* - ATAG address */ + mov r3, r12 /* - CPU ID */ b start_xen /* and disappear into the land of C */ /* Fail-stop @@ -288,7 +351,7 @@ puts: tst r2, #0x8 /* Check BUSY bit */ bne puts /* Wait for the UART to be ready */ ldrb r2, [r0], #1 /* Load next char */ - teq r2, #0 /* Exit on nul*/ + teq r2, #0 /* Exit on nul */ moveq pc, lr str r2, [r11] /* -> UARTDR (Data Register) */ b puts @@ -308,10 +371,8 @@ 1: ldr r2, [r11, #0x18] /* <- UA lsl r0, #4 /* Roll it through one nybble at a time */ subs r3, r3, #1 bne 1b - adr r0, crlf /* Finish with a newline */ - b puts + mov pc, lr -crlf: .asciz "\r\n" hex: .ascii "0123456789abcdef" .align 2 diff -r 51da35a2e502 -r ef66ce0f4fb3 xen/arch/arm/mm.c --- a/xen/arch/arm/mm.c Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/arch/arm/mm.c Fri Mar 09 13:39:06 2012 +0000 @@ -36,6 +36,9 @@ lpae_t xen_second[LPAE_ENTRIES*4] __attr static lpae_t xen_fixmap[LPAE_ENTRIES] __attribute__((__aligned__(4096))); static lpae_t xen_xenmap[LPAE_ENTRIES] __attribute__((__aligned__(4096))); +/* Non-boot CPUs use this to find the correct pagetables. */ +uint64_t boot_httbr; + /* Limits of the Xen heap */ unsigned long xenheap_mfn_start, xenheap_mfn_end; unsigned long xenheap_virt_end; @@ -156,14 +159,6 @@ void __init setup_pagetables(unsigned lo lpae_t pte, *p; int i; - if ( boot_phys_offset != 0 ) - { - /* Remove the old identity mapping of the boot paddr */ - pte.bits = 0; - dest_va = (unsigned long)_start + boot_phys_offset; - write_pte(xen_second + second_linear_offset(dest_va), pte); - } - xen_paddr = device_tree_get_xen_paddr(); /* Map the destination in the boot misc area. */ @@ -186,11 +181,18 @@ void __init setup_pagetables(unsigned lo for ( i = 0; i < 4; i++) p[i].pt.base += (phys_offset - boot_phys_offset) >> PAGE_SHIFT; p = (void *) xen_second + dest_va - (unsigned long) _start; + if ( boot_phys_offset != 0 ) + { + /* Remove the old identity mapping of the boot paddr */ + unsigned long va = (unsigned long)_start + boot_phys_offset; + p[second_linear_offset(va)].bits = 0; + } for ( i = 0; i < 4 * LPAE_ENTRIES; i++) if ( p[i].pt.valid ) p[i].pt.base += (phys_offset - boot_phys_offset) >> PAGE_SHIFT; /* Change pagetables to the copy in the relocated Xen */ + boot_httbr = (unsigned long) xen_pgtable + phys_offset; asm volatile ( STORE_CP64(0, HTTBR) /* Change translation base */ "dsb;" /* Ensure visibility of HTTBR update */ @@ -198,7 +200,7 @@ void __init setup_pagetables(unsigned lo STORE_CP32(0, BPIALL) /* Flush branch predictor */ "dsb;" /* Ensure completion of TLB+BP flush */ "isb;" - : : "r" ((unsigned long) xen_pgtable + phys_offset) : "memory"); + : : "r" (boot_httbr) : "memory"); /* Undo the temporary map */ pte.bits = 0; diff -r 51da35a2e502 -r ef66ce0f4fb3 xen/arch/arm/setup.c --- a/xen/arch/arm/setup.c Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/arch/arm/setup.c Fri Mar 09 13:39:06 2012 +0000 @@ -44,7 +44,12 @@ static unsigned int __initdata max_cpus /* Xen stack for bringing up the first CPU. */ unsigned char __initdata init_stack[STACK_SIZE] __attribute__((__aligned__(STACK_SIZE))); -extern char __init_begin[], __init_end[], __bss_start[]; +extern const char __init_begin[], __init_end[], __bss_start[]; + +/* Spinlock for serializing CPU bringup */ +unsigned long __initdata boot_gate = 1; +/* Number of non-boot CPUs ready to enter C */ +unsigned long __initdata ready_cpus = 0; static __attribute_used__ void init_done(void) { @@ -151,14 +156,17 @@ static void __init setup_mm(unsigned lon end_boot_allocator(); } +/* C entry point for boot CPU */ void __init start_xen(unsigned long boot_phys_offset, unsigned long arm_type, - unsigned long atag_paddr) - + unsigned long atag_paddr, + unsigned long cpuid) { void *fdt; size_t fdt_size; - int i; + int cpus, i; + paddr_t gate_pa; + unsigned long *gate; fdt = (void *)BOOT_MISC_VIRT_START + (atag_paddr & ((1 << SECOND_SHIFT) - 1)); @@ -174,14 +182,26 @@ void __init start_xen(unsigned long boot console_init_preirq(); #endif + cpus = gic_init(); + + printk("Waiting for %i other CPUs to be ready\n", cpus - 1); + /* Bring the other CPUs up to paging before the original + * copy of .text gets overwritten. We need to use the unrelocated + * copy of boot_gate as that''s the one the others can see. */ + gate_pa = ((unsigned long) &boot_gate) + boot_phys_offset; + gate = map_domain_page(gate_pa >> PAGE_SHIFT) + (gate_pa & ~PAGE_MASK); + *gate = 0; + unmap_domain_page(gate); + /* Now send an event to wake the first non-boot CPU */ + asm volatile("dsb; isb; sev"); + /* And wait for them all to be ready. */ + while ( ready_cpus + 1 < cpus ) + smp_rmb(); + __set_current((struct vcpu *)0xfffff000); /* debug sanity */ idle_vcpu[0] = current; set_processor_id(0); /* needed early, for smp_processor_id() */ - /* TODO: smp_prepare_boot_cpu(void) */ - cpumask_set_cpu(smp_processor_id(), &cpu_online_map); - cpumask_set_cpu(smp_processor_id(), &cpu_present_map); - smp_prepare_cpus(max_cpus); init_xen_time(); @@ -208,8 +228,6 @@ void __init start_xen(unsigned long boot init_IRQ(); - gic_init(); - gic_route_irqs(); init_maintenance_interrupt();
# HG changeset patch # User Tim Deegan <tim@xen.org> # Date 1331300346 0 # Node ID ded775f8a1278c98d8341338b47df0700992e01b # Parent ef66ce0f4fb3bfe72acd691e4db33f0c1ea9137d arm: per-cpu areas Signed-off-by: Tim Deegan <tim@xen.org> Acked-by: Ian Campbell <ian.campbell@citrix.com> diff -r ef66ce0f4fb3 -r ded775f8a127 xen/arch/arm/Makefile --- a/xen/arch/arm/Makefile Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/arch/arm/Makefile Fri Mar 09 13:39:06 2012 +0000 @@ -13,6 +13,7 @@ obj-y += irq.o obj-y += kernel.o obj-y += mm.o obj-y += p2m.o +obj-y += percpu.o obj-y += guestcopy.o obj-y += setup.o obj-y += time.o diff -r ef66ce0f4fb3 -r ded775f8a127 xen/arch/arm/dummy.S --- a/xen/arch/arm/dummy.S Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/arch/arm/dummy.S Fri Mar 09 13:39:06 2012 +0000 @@ -14,7 +14,6 @@ DUMMY(per_cpu__cpu_core_mask); DUMMY(per_cpu__cpu_sibling_mask); DUMMY(node_online_map); DUMMY(smp_send_state_dump); -DUMMY(__per_cpu_offset); /* PIRQ support */ DUMMY(alloc_pirq_struct); diff -r ef66ce0f4fb3 -r ded775f8a127 xen/arch/arm/percpu.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/arm/percpu.c Fri Mar 09 13:39:06 2012 +0000 @@ -0,0 +1,85 @@ +#include <xen/config.h> +#include <xen/percpu.h> +#include <xen/cpu.h> +#include <xen/init.h> +#include <xen/mm.h> +#include <xen/rcupdate.h> + +unsigned long __per_cpu_offset[NR_CPUS]; +#define INVALID_PERCPU_AREA (-(long)__per_cpu_start) +#define PERCPU_ORDER (get_order_from_bytes(__per_cpu_data_end-__per_cpu_start)) + +void __init percpu_init_areas(void) +{ + unsigned int cpu; + for ( cpu = 1; cpu < NR_CPUS; cpu++ ) + __per_cpu_offset[cpu] = INVALID_PERCPU_AREA; +} + +static int init_percpu_area(unsigned int cpu) +{ + char *p; + if ( __per_cpu_offset[cpu] != INVALID_PERCPU_AREA ) + return -EBUSY; + if ( (p = alloc_xenheap_pages(PERCPU_ORDER, 0)) == NULL ) + return -ENOMEM; + memset(p, 0, __per_cpu_data_end - __per_cpu_start); + __per_cpu_offset[cpu] = p - __per_cpu_start; + return 0; +} + +struct free_info { + unsigned int cpu; + struct rcu_head rcu; +}; +static DEFINE_PER_CPU(struct free_info, free_info); + +static void _free_percpu_area(struct rcu_head *head) +{ + struct free_info *info = container_of(head, struct free_info, rcu); + unsigned int cpu = info->cpu; + char *p = __per_cpu_start + __per_cpu_offset[cpu]; + free_xenheap_pages(p, PERCPU_ORDER); + __per_cpu_offset[cpu] = INVALID_PERCPU_AREA; +} + +static void free_percpu_area(unsigned int cpu) +{ + struct free_info *info = &per_cpu(free_info, cpu); + info->cpu = cpu; + call_rcu(&info->rcu, _free_percpu_area); +} + +static int cpu_percpu_callback( + struct notifier_block *nfb, unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned long)hcpu; + int rc = 0; + + switch ( action ) + { + case CPU_UP_PREPARE: + rc = init_percpu_area(cpu); + break; + case CPU_UP_CANCELED: + case CPU_DEAD: + free_percpu_area(cpu); + break; + default: + break; + } + + return !rc ? NOTIFY_DONE : notifier_from_errno(rc); +} + +static struct notifier_block cpu_percpu_nfb = { + .notifier_call = cpu_percpu_callback, + .priority = 100 /* highest priority */ +}; + +static int __init percpu_presmp_init(void) +{ + register_cpu_notifier(&cpu_percpu_nfb); + return 0; +} +presmp_initcall(percpu_presmp_init); diff -r ef66ce0f4fb3 -r ded775f8a127 xen/include/asm-arm/percpu.h --- a/xen/include/asm-arm/percpu.h Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/include/asm-arm/percpu.h Fri Mar 09 13:39:06 2012 +0000 @@ -12,8 +12,11 @@ void percpu_init_areas(void); __attribute__((__section__(".bss.percpu" #suffix))) \ __typeof__(type) per_cpu_##name -#define per_cpu(var, cpu) ((&per_cpu__##var)[cpu?0:0]) -#define __get_cpu_var(var) per_cpu__##var + +#define per_cpu(var, cpu) \ + (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu])) +#define __get_cpu_var(var) \ + (*RELOC_HIDE(&per_cpu__##var, get_cpu_info()->per_cpu_offset)) #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
# HG changeset patch # User Tim Deegan <tim@xen.org> # Date 1331300346 0 # Node ID 469d9fc8e755b8bd0de386c8cd505229ecdf061f # Parent ded775f8a1278c98d8341338b47df0700992e01b arm: start plumbing in SMP bringup in C Still a noop, but no longer just a dummy symbol. Signed-off-by: Tim Deegan <tim@xen.org> Acked-by: Ian Campbell <ian.campbell@citrix.com> diff -r ded775f8a127 -r 469d9fc8e755 xen/arch/arm/dummy.S --- a/xen/arch/arm/dummy.S Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/arch/arm/dummy.S Fri Mar 09 13:39:06 2012 +0000 @@ -7,9 +7,6 @@ x: .word 0xe7f000f0 /* Undefined instruc x: mov pc, lr /* SMP support */ -DUMMY(__cpu_die); -DUMMY(__cpu_disable); -DUMMY(__cpu_up); DUMMY(per_cpu__cpu_core_mask); DUMMY(per_cpu__cpu_sibling_mask); DUMMY(node_online_map); diff -r ded775f8a127 -r 469d9fc8e755 xen/arch/arm/setup.c --- a/xen/arch/arm/setup.c Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/arch/arm/setup.c Fri Mar 09 13:39:06 2012 +0000 @@ -38,9 +38,6 @@ #include <asm/setup.h> #include "gic.h" -/* maxcpus: maximum number of CPUs to activate. */ -static unsigned int __initdata max_cpus = NR_CPUS; - /* Xen stack for bringing up the first CPU. */ unsigned char __initdata init_stack[STACK_SIZE] __attribute__((__aligned__(STACK_SIZE))); @@ -202,7 +199,7 @@ void __init start_xen(unsigned long boot idle_vcpu[0] = current; set_processor_id(0); /* needed early, for smp_processor_id() */ - smp_prepare_cpus(max_cpus); + smp_prepare_cpus(cpus); init_xen_time(); @@ -249,7 +246,7 @@ void __init start_xen(unsigned long boot for_each_present_cpu ( i ) { - if ( (num_online_cpus() < max_cpus) && !cpu_online(i) ) + if ( (num_online_cpus() < cpus) && !cpu_online(i) ) { int ret = cpu_up(i); if ( ret != 0 ) diff -r ded775f8a127 -r 469d9fc8e755 xen/arch/arm/smpboot.c --- a/xen/arch/arm/smpboot.c Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/arch/arm/smpboot.c Fri Mar 09 13:39:06 2012 +0000 @@ -19,6 +19,7 @@ #include <xen/cpumask.h> #include <xen/smp.h> #include <xen/init.h> +#include <xen/errno.h> cpumask_t cpu_online_map; EXPORT_SYMBOL(cpu_online_map); @@ -30,16 +31,40 @@ EXPORT_SYMBOL(cpu_possible_map); void __init smp_prepare_cpus (unsigned int max_cpus) { - set_processor_id(0); /* needed early, for smp_processor_id() */ + int i; + set_processor_id(0); /* needed early, for smp_processor_id() */ - cpumask_clear(&cpu_online_map); - cpumask_clear(&cpu_present_map); - cpumask_clear(&cpu_possible_map); - cpumask_set_cpu(0, &cpu_online_map); - cpumask_set_cpu(0, &cpu_present_map); - cpumask_set_cpu(0, &cpu_possible_map); - return; + cpumask_clear(&cpu_online_map); + cpumask_set_cpu(0, &cpu_online_map); + + cpumask_clear(&cpu_possible_map); + for ( i = 0; i < max_cpus; i++ ) + cpumask_set_cpu(i, &cpu_possible_map); + cpumask_copy(&cpu_present_map, &cpu_possible_map); } + +/* Bring up a non-boot CPU */ +int __cpu_up(unsigned int cpu) +{ + /* Not yet... */ + return -ENODEV; +} + +/* Shut down the current CPU */ +void __cpu_disable(void) +{ + /* TODO: take down timers, GIC, &c. */ + BUG(); +} + +/* Wait for a remote CPU to die */ +void __cpu_die(unsigned int cpu) +{ + /* TODO: interlock with __cpu_disable */ + BUG(); +} + + /* * Local variables: * mode: C
# HG changeset patch # User Tim Deegan <tim@xen.org> # Date 1331300346 0 # Node ID 1b26b83318b378a6e41916e230e27030ee4539e0 # Parent 469d9fc8e755b8bd0de386c8cd505229ecdf061f arm: Boot secondary CPUs into C Secondary CPUs come up directly onto the stack of the appropriate idle vcpu; the boot CPU starts on a statically allocated stack and switches over to the idle vcpu''s one once the idle domain has been built. Signed-off-by: Tim Deegan <tim@xen.org> diff -r 469d9fc8e755 -r 1b26b83318b3 xen/arch/arm/gic.c --- a/xen/arch/arm/gic.c Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/arch/arm/gic.c Fri Mar 09 13:39:06 2012 +0000 @@ -245,7 +245,6 @@ static void __cpuinit gic_hyp_init(void) vtr = GICH[GICH_VTR]; nr_lrs = (vtr & GICH_VTR_NRLRGS) + 1; - printk("GICH: %d list registers available\n", nr_lrs); GICH[GICH_HCR] = GICH_HCR_EN; GICH[GICH_MISR] = GICH_MISR_EOI; @@ -278,6 +277,15 @@ int __init gic_init(void) return gic.cpus; } +/* Set up the per-CPU parts of the GIC for a secondary CPU */ +void __cpuinit gic_init_secondary_cpu(void) +{ + spin_lock(&gic.lock); + gic_cpu_init(); + gic_hyp_init(); + spin_unlock(&gic.lock); +} + void gic_route_irqs(void) { /* XXX should get these from DT */ diff -r 469d9fc8e755 -r 1b26b83318b3 xen/arch/arm/gic.h --- a/xen/arch/arm/gic.h Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/arch/arm/gic.h Fri Mar 09 13:39:06 2012 +0000 @@ -140,6 +140,8 @@ extern int gic_route_irq_to_guest(struct extern void gic_interrupt(struct cpu_user_regs *regs, int is_fiq); /* Bring up the interrupt controller, and report # cpus attached */ extern int gic_init(void); +/* Bring up a secondary CPU''s per-CPU GIC interface */ +extern void gic_init_secondary_cpu(void); /* setup the gic virtual interface for a guest */ extern void gicv_setup(struct domain *d); #endif diff -r 469d9fc8e755 -r 1b26b83318b3 xen/arch/arm/head.S --- a/xen/arch/arm/head.S Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/arch/arm/head.S Fri Mar 09 13:39:06 2012 +0000 @@ -307,17 +307,23 @@ 1: ldrex r1, [r0] /* * and brought up the memory allocator, non-boot CPUs can get their * own stacks and enter C. */ 1: wfe - b 1b + dsb + ldr r0, =smp_up_cpu + ldr r1, [r0] /* Which CPU is being booted? */ + teq r1, r12 /* Is it us? */ + bne 1b launch: - ldr sp, =init_stack /* Supply a stack */ + ldr r0, =init_stack /* Find the boot-time stack */ + ldr sp, [r0] add sp, #STACK_SIZE /* (which grows down from the top). */ sub sp, #CPUINFO_sizeof /* Make room for CPU save record */ mov r0, r10 /* Marshal args: - phys_offset */ mov r1, r7 /* - machine type */ mov r2, r8 /* - ATAG address */ - mov r3, r12 /* - CPU ID */ - b start_xen /* and disappear into the land of C */ + movs r3, r12 /* - CPU ID */ + beq start_xen /* and disappear into the land of C */ + b start_secondary /* (to the appropriate entry point) */ /* Fail-stop * r0: string explaining why */ diff -r 469d9fc8e755 -r 1b26b83318b3 xen/arch/arm/setup.c --- a/xen/arch/arm/setup.c Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/arch/arm/setup.c Fri Mar 09 13:39:06 2012 +0000 @@ -38,9 +38,6 @@ #include <asm/setup.h> #include "gic.h" -/* Xen stack for bringing up the first CPU. */ -unsigned char __initdata init_stack[STACK_SIZE] __attribute__((__aligned__(STACK_SIZE))); - extern const char __init_begin[], __init_end[], __bss_start[]; /* Spinlock for serializing CPU bringup */ @@ -179,6 +176,8 @@ void __init start_xen(unsigned long boot console_init_preirq(); #endif + percpu_init_areas(); + cpus = gic_init(); printk("Waiting for %i other CPUs to be ready\n", cpus - 1); @@ -197,7 +196,6 @@ void __init start_xen(unsigned long boot __set_current((struct vcpu *)0xfffff000); /* debug sanity */ idle_vcpu[0] = current; - set_processor_id(0); /* needed early, for smp_processor_id() */ smp_prepare_cpus(cpus); @@ -284,7 +282,11 @@ void __init start_xen(unsigned long boot domain_unpause_by_systemcontroller(dom0); - reset_stack_and_jump(init_done); + /* Switch on to the dynamically allocated stack for the idle vcpu + * since the static one we''re running on is about to be freed. */ + memcpy(idle_vcpu[0]->arch.cpu_info, get_cpu_info(), + sizeof(struct cpu_info)); + switch_stack_and_jump(idle_vcpu[0]->arch.cpu_info, init_done); } void arch_get_xen_caps(xen_capabilities_info_t *info) diff -r 469d9fc8e755 -r 1b26b83318b3 xen/arch/arm/smpboot.c --- a/xen/arch/arm/smpboot.c Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/arch/arm/smpboot.c Fri Mar 09 13:39:06 2012 +0000 @@ -16,10 +16,15 @@ * GNU General Public License for more details. */ +#include <xen/cpu.h> #include <xen/cpumask.h> +#include <xen/errno.h> +#include <xen/init.h> +#include <xen/mm.h> +#include <xen/sched.h> #include <xen/smp.h> -#include <xen/init.h> -#include <xen/errno.h> +#include <xen/softirq.h> +#include "gic.h" cpumask_t cpu_online_map; EXPORT_SYMBOL(cpu_online_map); @@ -28,6 +33,13 @@ EXPORT_SYMBOL(cpu_online_map); cpumask_t cpu_possible_map; EXPORT_SYMBOL(cpu_possible_map); +/* Xen stack for bringing up the first CPU. */ +static unsigned char __initdata cpu0_boot_stack[STACK_SIZE] + __attribute__((__aligned__(STACK_SIZE))); + +/* Pointer to the stack, used by head.S when entering C */ +unsigned char *init_stack = cpu0_boot_stack; + void __init smp_prepare_cpus (unsigned int max_cpus) { @@ -43,11 +55,43 @@ smp_prepare_cpus (unsigned int max_cpus) cpumask_copy(&cpu_present_map, &cpu_possible_map); } -/* Bring up a non-boot CPU */ -int __cpu_up(unsigned int cpu) +/* Shared state for coordinating CPU bringup */ +unsigned long smp_up_cpu = 0; + +/* Boot the current CPU */ +void __cpuinit start_secondary(unsigned long boot_phys_offset, + unsigned long arm_type, + unsigned long atag_paddr, + unsigned long cpuid) { - /* Not yet... */ - return -ENODEV; + memset(get_cpu_info(), 0, sizeof (struct cpu_info)); + + /* TODO: handle boards where CPUIDs are not contiguous */ + set_processor_id(cpuid); + + /* Setup Hyp vector base */ + WRITE_CP32((uint32_t) hyp_traps_vector, HVBAR); + + dprintk(XENLOG_DEBUG, "CPU %li awake.\n", cpuid); + + gic_init_secondary_cpu(); + + set_current(idle_vcpu[cpuid]); + this_cpu(curr_vcpu) = current; + + /* Run local notifiers */ + notify_cpu_starting(cpuid); + wmb(); + + /* Now report this CPU is up */ + cpumask_set_cpu(cpuid, &cpu_online_map); + wmb(); + + local_irq_enable(); + + dprintk(XENLOG_DEBUG, "CPU %li booted.\n", cpuid); + + startup_cpu_idle_loop(); } /* Shut down the current CPU */ @@ -57,6 +101,26 @@ void __cpu_disable(void) BUG(); } +/* Bring up a remote CPU */ +int __cpu_up(unsigned int cpu) +{ + /* Tell the remote CPU which stack to boot on. */ + init_stack = idle_vcpu[cpu]->arch.stack; + + /* Unblock the CPU. It should be waiting in the loop in head.S + * for an event to arrive when smp_up_cpu matches its cpuid. */ + smp_up_cpu = cpu; + asm volatile("dsb; isb; sev"); + + while ( !cpu_online(cpu) ) + { + cpu_relax(); + process_pending_softirqs(); + } + + return 0; +} + /* Wait for a remote CPU to die */ void __cpu_die(unsigned int cpu) { diff -r 469d9fc8e755 -r 1b26b83318b3 xen/include/asm-arm/current.h --- a/xen/include/asm-arm/current.h Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/include/asm-arm/current.h Fri Mar 09 13:39:06 2012 +0000 @@ -47,10 +47,10 @@ static inline struct cpu_info *get_cpu_i #define guest_cpu_user_regs() (&get_cpu_info()->guest_cpu_user_regs) -#define reset_stack_and_jump(__fn) \ - __asm__ __volatile__ ( \ - "mov sp,%0; b "STR(__fn) \ - : : "r" (guest_cpu_user_regs()) : "memory" ) +#define switch_stack_and_jump(stack, fn) \ + asm volatile ("mov sp,%0; b " STR(fn) : : "r" (stack) : "memory" ) + +#define reset_stack_and_jump(fn) switch_stack_and_jump(get_cpu_info(), fn) #endif
# HG changeset patch # User Tim Deegan <tim@xen.org> # Date 1331300346 0 # Node ID 80b1ce050d104d9bb2ea41634e886cdc0f175c3e # Parent 1b26b83318b378a6e41916e230e27030ee4539e0 arm: SMP CPU shutdown For completeness, also implelent the CPU shutdown path. Signed-off-by: Tim Deegan <tim@xen.org> diff -r 1b26b83318b3 -r 80b1ce050d10 xen/arch/arm/domain.c --- a/xen/arch/arm/domain.c Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/arch/arm/domain.c Fri Mar 09 13:39:06 2012 +0000 @@ -31,12 +31,10 @@ void idle_loop(void) { for ( ; ; ) { - /* TODO - if ( cpu_is_offline(smp_processor_id()) ) - play_dead(); - (*pm_idle)(); - BUG(); - */ + if ( cpu_is_offline(smp_processor_id()) ) + stop_cpu(); + + /* TODO: (*pm_idle)(); */ do_tasklet(); do_softirq(); } diff -r 1b26b83318b3 -r 80b1ce050d10 xen/arch/arm/gic.c --- a/xen/arch/arm/gic.c Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/arch/arm/gic.c Fri Mar 09 13:39:06 2012 +0000 @@ -239,6 +239,11 @@ static void __cpuinit gic_cpu_init(void) GICC[GICC_CTLR] = GICC_CTL_ENABLE|GICC_CTL_EOI; /* Turn on delivery */ } +static void gic_cpu_disable(void) +{ + GICC[GICC_CTLR] = 0; +} + static void __cpuinit gic_hyp_init(void) { uint32_t vtr; @@ -250,6 +255,11 @@ static void __cpuinit gic_hyp_init(void) GICH[GICH_MISR] = GICH_MISR_EOI; } +static void __cpuinit gic_hyp_disable(void) +{ + GICH[GICH_HCR] = 0; +} + /* Set up the GIC */ int __init gic_init(void) { @@ -286,6 +296,15 @@ void __cpuinit gic_init_secondary_cpu(vo spin_unlock(&gic.lock); } +/* Shut down the per-CPU GIC interface */ +void gic_disable_cpu(void) +{ + spin_lock(&gic.lock); + gic_cpu_disable(); + gic_hyp_disable(); + spin_unlock(&gic.lock); +} + void gic_route_irqs(void) { /* XXX should get these from DT */ diff -r 1b26b83318b3 -r 80b1ce050d10 xen/arch/arm/gic.h --- a/xen/arch/arm/gic.h Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/arch/arm/gic.h Fri Mar 09 13:39:06 2012 +0000 @@ -142,6 +142,8 @@ extern void gic_interrupt(struct cpu_use extern int gic_init(void); /* Bring up a secondary CPU''s per-CPU GIC interface */ extern void gic_init_secondary_cpu(void); +/* Take down a CPU''s per-CPU GIC interface */ +extern void gic_disable_cpu(void); /* setup the gic virtual interface for a guest */ extern void gicv_setup(struct domain *d); #endif diff -r 1b26b83318b3 -r 80b1ce050d10 xen/arch/arm/smpboot.c --- a/xen/arch/arm/smpboot.c Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/arch/arm/smpboot.c Fri Mar 09 13:39:06 2012 +0000 @@ -18,6 +18,7 @@ #include <xen/cpu.h> #include <xen/cpumask.h> +#include <xen/delay.h> #include <xen/errno.h> #include <xen/init.h> #include <xen/mm.h> @@ -57,6 +58,7 @@ smp_prepare_cpus (unsigned int max_cpus) /* Shared state for coordinating CPU bringup */ unsigned long smp_up_cpu = 0; +static bool_t cpu_is_dead = 0; /* Boot the current CPU */ void __cpuinit start_secondary(unsigned long boot_phys_offset, @@ -97,8 +99,35 @@ void __cpuinit start_secondary(unsigned /* Shut down the current CPU */ void __cpu_disable(void) { - /* TODO: take down timers, GIC, &c. */ - BUG(); + unsigned int cpu = get_processor_id(); + + local_irq_disable(); + gic_disable_cpu(); + /* Allow any queued timer interrupts to get serviced */ + local_irq_enable(); + mdelay(1); + local_irq_disable(); + + /* It''s now safe to remove this processor from the online map */ + cpumask_clear_cpu(cpu, &cpu_online_map); + + if ( cpu_disable_scheduler(cpu) ) + BUG(); + mb(); + + /* Return to caller; eventually the IPI mechanism will unwind and the + * scheduler will drop to the idle loop, which will call stop_cpu(). */ +} + +void stop_cpu(void) +{ + local_irq_disable(); + cpu_is_dead = 1; + /* Make sure the write happens before we sleep forever */ + dsb(); + isb(); + while ( 1 ) + asm volatile("wfi"); } /* Bring up a remote CPU */ @@ -124,8 +153,19 @@ int __cpu_up(unsigned int cpu) /* Wait for a remote CPU to die */ void __cpu_die(unsigned int cpu) { - /* TODO: interlock with __cpu_disable */ - BUG(); + unsigned int i = 0; + + while ( !cpu_is_dead ) + { + mdelay(100); + cpu_relax(); + process_pending_softirqs(); + if ( (++i % 10) == 0 ) + printk(KERN_ERR "CPU %u still not dead...\n", cpu); + mb(); + } + cpu_is_dead = 0; + mb(); } diff -r 1b26b83318b3 -r 80b1ce050d10 xen/include/asm-arm/smp.h --- a/xen/include/asm-arm/smp.h Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/include/asm-arm/smp.h Fri Mar 09 13:39:06 2012 +0000 @@ -14,6 +14,8 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_ #define raw_smp_processor_id() (get_processor_id()) +extern void stop_cpu(void); + #endif /* * Local variables:
# HG changeset patch # User Tim Deegan <tim@xen.org> # Date 1331300346 0 # Node ID 78eebd867ba25f5165283b45122d1006370ae71d # Parent 80b1ce050d104d9bb2ea41634e886cdc0f175c3e arm: Shutdown and reboot Reboot runes grabbed from linux''s SP810 reset function. Doesn''t seem to work on the model, though. Signed-off-by: Tim Deegan <tim@xen.org> Acked-by: Ian Campbell <ian.campbell@citrix.com> diff -r 80b1ce050d10 -r 78eebd867ba2 xen/arch/arm/shutdown.c --- a/xen/arch/arm/shutdown.c Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/arch/arm/shutdown.c Fri Mar 09 13:39:06 2012 +0000 @@ -1,18 +1,64 @@ #include <xen/config.h> +#include <xen/console.h> +#include <xen/cpu.h> +#include <xen/delay.h> #include <xen/lib.h> +#include <xen/mm.h> +#include <xen/smp.h> + +static void raw_machine_reset(void) +{ + /* XXX get this from device tree */ +#ifdef SP810_ADDRESS + /* Use the SP810 system controller to force a reset */ + volatile uint32_t *sp810; + set_fixmap(FIXMAP_MISC, SP810_ADDRESS >> PAGE_SHIFT, DEV_SHARED); + sp810 = ((uint32_t *) + (FIXMAP_ADDR(FIXMAP_MISC) + (SP810_ADDRESS & ~PAGE_MASK))); + sp810[0] = 0x3; /* switch to slow mode */ + dsb(); isb(); + sp810[1] = 0x1; /* writing any value to SCSYSSTAT reg will reset system */ + dsb(); isb(); + clear_fixmap(FIXMAP_MISC); +#endif +} + +static void halt_this_cpu(void *arg) +{ + __cpu_disable(); + stop_cpu(); +} void machine_halt(void) { - /* TODO: halt */ - while(1) ; + watchdog_disable(); + console_start_sync(); + local_irq_enable(); + smp_call_function(halt_this_cpu, NULL, 0); + halt_this_cpu(NULL); } void machine_restart(unsigned int delay_millisecs) { - /* TODO: restart */ - printk("Cannot restart yet\n"); - while(1); + int timeout = 10; + + local_irq_enable(); + smp_call_function(halt_this_cpu, NULL, 0); + local_irq_disable(); + + mdelay(delay_millisecs); + + /* Wait at most another 10ms for all other CPUs to go offline. */ + while ( (num_online_cpus() > 1) && (timeout-- > 0) ) + mdelay(1); + + while ( 1 ) + { + raw_machine_reset(); + mdelay(100); + } } + /* * Local variables: * mode: C diff -r 80b1ce050d10 -r 78eebd867ba2 xen/include/asm-arm/config.h --- a/xen/include/asm-arm/config.h Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/include/asm-arm/config.h Fri Mar 09 13:39:06 2012 +0000 @@ -119,6 +119,9 @@ extern unsigned long frametable_virt_end #define GIC_CR_OFFSET 0x2000 #define GIC_HR_OFFSET 0x4000 /* Guess work http://lists.infradead.org/pipermail/linux-arm-kernel/2011-September/064219.html */ #define GIC_VR_OFFSET 0x6000 /* Virtual Machine CPU interface) */ +/* Board-specific: base address of system controller */ +#define SP810_ADDRESS 0x1C020000 + #endif /* __ARM_CONFIG_H__ */ /*
Tim Deegan
2012-Mar-09 13:43 UTC
[PATCH 9 of 9] arm: new hook for late MMU setup on secondary CPUs
# HG changeset patch # User Tim Deegan <tim@xen.org> # Date 1331300395 0 # Node ID e18aa8c6c1f472bad15310a6e38457697d71540d # Parent 78eebd867ba25f5165283b45122d1006370ae71d arm: new hook for late MMU setup on secondary CPUs The boot CPU turns on W^X in setup_pagetables(). Do the same for other CPUs after they boot. If we go to per-CPU pagetables, this is where that will happen. Signed-off-by: Tim Deegan <tim@xen.org> diff -r 78eebd867ba2 -r e18aa8c6c1f4 xen/arch/arm/mm.c --- a/xen/arch/arm/mm.c Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/arch/arm/mm.c Fri Mar 09 13:39:55 2012 +0000 @@ -263,6 +263,13 @@ void __init setup_pagetables(unsigned lo WRITE_CP32(READ_CP32(HSCTLR) | SCTLR_WXN, HSCTLR); } +/* MMU setup for secondary CPUS (which already have paging enabled) */ +void __cpuinit mmu_init_secondary_cpu(void) +{ + /* From now on, no mapping may be both writable and executable. */ + WRITE_CP32(READ_CP32(HSCTLR) | SCTLR_WXN, HSCTLR); +} + /* Create Xen''s mappings of memory. * Base and virt must be 32MB aligned and size a multiple of 32MB. */ static void __init create_mappings(unsigned long virt, diff -r 78eebd867ba2 -r e18aa8c6c1f4 xen/arch/arm/smpboot.c --- a/xen/arch/arm/smpboot.c Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/arch/arm/smpboot.c Fri Mar 09 13:39:55 2012 +0000 @@ -76,6 +76,7 @@ void __cpuinit start_secondary(unsigned dprintk(XENLOG_DEBUG, "CPU %li awake.\n", cpuid); + mmu_init_secondary_cpu(); gic_init_secondary_cpu(); set_current(idle_vcpu[cpuid]); diff -r 78eebd867ba2 -r e18aa8c6c1f4 xen/include/asm-arm/mm.h --- a/xen/include/asm-arm/mm.h Fri Mar 09 13:39:06 2012 +0000 +++ b/xen/include/asm-arm/mm.h Fri Mar 09 13:39:55 2012 +0000 @@ -136,6 +136,8 @@ extern unsigned long total_pages; /* Boot-time pagetable setup */ extern void setup_pagetables(unsigned long boot_phys_offset); +/* MMU setup for seccondary CPUS (which already have paging enabled) */ +extern void __cpuinit mmu_init_secondary_cpu(void); /* Set up the xenheap: up to 1GB of contiguous, always-mapped memory. * Base must be 32MB aligned and size a multiple of 32MB. */ extern void setup_xenheap_mappings(unsigned long base_mfn, unsigned long nr_mfns);
Hold off on this - it seems to mess up context switches (something to so with current being a per-cpu variable and per-cpu variables being accessed via the current vcpu''s stack...) Tim. At 13:43 +0000 on 09 Mar (1331300613), Tim Deegan wrote:> # HG changeset patch > # User Tim Deegan <tim@xen.org> > # Date 1331300346 0 > # Node ID ded775f8a1278c98d8341338b47df0700992e01b > # Parent ef66ce0f4fb3bfe72acd691e4db33f0c1ea9137d > arm: per-cpu areas > > Signed-off-by: Tim Deegan <tim@xen.org> > Acked-by: Ian Campbell <ian.campbell@citrix.com> > > diff -r ef66ce0f4fb3 -r ded775f8a127 xen/arch/arm/Makefile > --- a/xen/arch/arm/Makefile Fri Mar 09 13:39:06 2012 +0000 > +++ b/xen/arch/arm/Makefile Fri Mar 09 13:39:06 2012 +0000 > @@ -13,6 +13,7 @@ obj-y += irq.o > obj-y += kernel.o > obj-y += mm.o > obj-y += p2m.o > +obj-y += percpu.o > obj-y += guestcopy.o > obj-y += setup.o > obj-y += time.o > diff -r ef66ce0f4fb3 -r ded775f8a127 xen/arch/arm/dummy.S > --- a/xen/arch/arm/dummy.S Fri Mar 09 13:39:06 2012 +0000 > +++ b/xen/arch/arm/dummy.S Fri Mar 09 13:39:06 2012 +0000 > @@ -14,7 +14,6 @@ DUMMY(per_cpu__cpu_core_mask); > DUMMY(per_cpu__cpu_sibling_mask); > DUMMY(node_online_map); > DUMMY(smp_send_state_dump); > -DUMMY(__per_cpu_offset); > > /* PIRQ support */ > DUMMY(alloc_pirq_struct); > diff -r ef66ce0f4fb3 -r ded775f8a127 xen/arch/arm/percpu.c > --- /dev/null Thu Jan 01 00:00:00 1970 +0000 > +++ b/xen/arch/arm/percpu.c Fri Mar 09 13:39:06 2012 +0000 > @@ -0,0 +1,85 @@ > +#include <xen/config.h> > +#include <xen/percpu.h> > +#include <xen/cpu.h> > +#include <xen/init.h> > +#include <xen/mm.h> > +#include <xen/rcupdate.h> > + > +unsigned long __per_cpu_offset[NR_CPUS]; > +#define INVALID_PERCPU_AREA (-(long)__per_cpu_start) > +#define PERCPU_ORDER (get_order_from_bytes(__per_cpu_data_end-__per_cpu_start)) > + > +void __init percpu_init_areas(void) > +{ > + unsigned int cpu; > + for ( cpu = 1; cpu < NR_CPUS; cpu++ ) > + __per_cpu_offset[cpu] = INVALID_PERCPU_AREA; > +} > + > +static int init_percpu_area(unsigned int cpu) > +{ > + char *p; > + if ( __per_cpu_offset[cpu] != INVALID_PERCPU_AREA ) > + return -EBUSY; > + if ( (p = alloc_xenheap_pages(PERCPU_ORDER, 0)) == NULL ) > + return -ENOMEM; > + memset(p, 0, __per_cpu_data_end - __per_cpu_start); > + __per_cpu_offset[cpu] = p - __per_cpu_start; > + return 0; > +} > + > +struct free_info { > + unsigned int cpu; > + struct rcu_head rcu; > +}; > +static DEFINE_PER_CPU(struct free_info, free_info); > + > +static void _free_percpu_area(struct rcu_head *head) > +{ > + struct free_info *info = container_of(head, struct free_info, rcu); > + unsigned int cpu = info->cpu; > + char *p = __per_cpu_start + __per_cpu_offset[cpu]; > + free_xenheap_pages(p, PERCPU_ORDER); > + __per_cpu_offset[cpu] = INVALID_PERCPU_AREA; > +} > + > +static void free_percpu_area(unsigned int cpu) > +{ > + struct free_info *info = &per_cpu(free_info, cpu); > + info->cpu = cpu; > + call_rcu(&info->rcu, _free_percpu_area); > +} > + > +static int cpu_percpu_callback( > + struct notifier_block *nfb, unsigned long action, void *hcpu) > +{ > + unsigned int cpu = (unsigned long)hcpu; > + int rc = 0; > + > + switch ( action ) > + { > + case CPU_UP_PREPARE: > + rc = init_percpu_area(cpu); > + break; > + case CPU_UP_CANCELED: > + case CPU_DEAD: > + free_percpu_area(cpu); > + break; > + default: > + break; > + } > + > + return !rc ? NOTIFY_DONE : notifier_from_errno(rc); > +} > + > +static struct notifier_block cpu_percpu_nfb = { > + .notifier_call = cpu_percpu_callback, > + .priority = 100 /* highest priority */ > +}; > + > +static int __init percpu_presmp_init(void) > +{ > + register_cpu_notifier(&cpu_percpu_nfb); > + return 0; > +} > +presmp_initcall(percpu_presmp_init); > diff -r ef66ce0f4fb3 -r ded775f8a127 xen/include/asm-arm/percpu.h > --- a/xen/include/asm-arm/percpu.h Fri Mar 09 13:39:06 2012 +0000 > +++ b/xen/include/asm-arm/percpu.h Fri Mar 09 13:39:06 2012 +0000 > @@ -12,8 +12,11 @@ void percpu_init_areas(void); > __attribute__((__section__(".bss.percpu" #suffix))) \ > __typeof__(type) per_cpu_##name > > -#define per_cpu(var, cpu) ((&per_cpu__##var)[cpu?0:0]) > -#define __get_cpu_var(var) per_cpu__##var > + > +#define per_cpu(var, cpu) \ > + (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu])) > +#define __get_cpu_var(var) \ > + (*RELOC_HIDE(&per_cpu__##var, get_cpu_info()->per_cpu_offset)) > > #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xen.org > http://lists.xen.org/xen-devel
Maybe Matching Threads
- [PATCH v5 0/7] Dissociate logical and gic/hardware CPU ID
- [PATCH+RFC+HACK 00/16] xen: arm initial support for xgene arm64 platform
- [PATCH v2] arm: add few checks to gic_init
- [PATCH] xen: arm: improve VMID allocation.
- [PATCH v3 00/13] xen: arm initial support for xgene arm64 platform