The following series makes host SMP work for both arm32 and arm64. Tested in both cases with the models and 2 pCPUS. Ian.
Ian Campbell
2013-Mar-06 08:54 UTC
[PATCH 1/9] arm: initialise VCPU SCTLR in vcpu_initialise
From: Ian Campbell <ian.campbell@citrix.com> Ensuring a sane initial starting state for vcpus other than domain 0s. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- xen/arch/arm/domain.c | 2 ++ xen/arch/arm/domain_build.c | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c index 9be83a6..f369871 100644 --- a/xen/arch/arm/domain.c +++ b/xen/arch/arm/domain.c @@ -435,6 +435,8 @@ int vcpu_initialise(struct vcpu *v) if ( is_idle_vcpu(v) ) return rc; + v->arch.sctlr = SCTLR_BASE; + if ( (rc = vcpu_vgic_init(v)) != 0 ) return rc; diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c index 07303fe..bdf41fa 100644 --- a/xen/arch/arm/domain_build.c +++ b/xen/arch/arm/domain_build.c @@ -373,8 +373,6 @@ int construct_dom0(struct domain *d) } #endif - v->arch.sctlr = SCTLR_BASE; - WRITE_SYSREG(HCR_PTW|HCR_BSU_OUTER|HCR_AMO|HCR_IMO|HCR_VM|HCR_TWI, HCR_EL2); isb(); -- 1.7.10.4
Ian Campbell
2013-Mar-06 08:54 UTC
[PATCH 2/9] arm: consolidate setup of hypervisor traps and second stage paging
From: Ian Campbell <ian.campbell@citrix.com> In particular be sure to initisalise HCR_EL2 on secondary processors. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- xen/arch/arm/domain_build.c | 7 ------- xen/arch/arm/mm.c | 10 ++++++++++ xen/arch/arm/setup.c | 13 +++---------- xen/arch/arm/smpboot.c | 5 +++-- xen/arch/arm/traps.c | 11 +++++++++++ xen/include/asm-arm/mm.h | 4 +++- xen/include/asm-arm/processor.h | 2 ++ 7 files changed, 32 insertions(+), 20 deletions(-) diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c index bdf41fa..f161845 100644 --- a/xen/arch/arm/domain_build.c +++ b/xen/arch/arm/domain_build.c @@ -322,10 +322,6 @@ int construct_dom0(struct domain *d) gic_route_irq_to_guest(d, 46, "lcd"); gic_route_irq_to_guest(d, 47, "eth"); - /* Enable second stage translation */ - WRITE_SYSREG(READ_SYSREG(HCR_EL2) | HCR_VM, HCR_EL2); - isb(); - /* The following loads use the domain''s p2m */ p2m_load_VTTBR(d); @@ -373,9 +369,6 @@ int construct_dom0(struct domain *d) } #endif - WRITE_SYSREG(HCR_PTW|HCR_BSU_OUTER|HCR_AMO|HCR_IMO|HCR_VM|HCR_TWI, HCR_EL2); - isb(); - local_abort_enable(); return 0; diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c index 9661f10..ba3140d 100644 --- a/xen/arch/arm/mm.c +++ b/xen/arch/arm/mm.c @@ -251,6 +251,16 @@ void __init arch_init_memory(void) BUG_ON(IS_ERR(dom_cow)); } +void __cpuinit setup_virt_paging(void) +{ + /* Setup Stage 2 address translation */ + /* SH0=00, ORGN0=IRGN0=01 + * SL0=01 (Level-1) + * T0SZ=(1)1000 = -8 (40 bit physical addresses) + */ + WRITE_SYSREG32(0x80002558, VTCR_EL2); isb(); +} + /* Boot-time pagetable setup. * Changes here may need matching changes in head.S */ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr) diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c index 566f36c..cfe3d94 100644 --- a/xen/arch/arm/setup.c +++ b/xen/arch/arm/setup.c @@ -446,16 +446,9 @@ void __init start_xen(unsigned long boot_phys_offset, set_current((struct vcpu *)0xfffff000); /* debug sanity */ idle_vcpu[0] = current; - /* Setup Hyp vector base */ - WRITE_SYSREG((vaddr_t)hyp_traps_vector, VBAR_EL2); - isb(); - - /* Setup Stage 2 address translation */ - /* SH0=00, ORGN0=IRGN0=01 - * SL0=01 (Level-1) - * T0SZ=(1)1000 = -8 (40 bit physical addresses) - */ - WRITE_SYSREG32(0x80002558, VTCR_EL2); isb(); + init_traps(); + + setup_virt_paging(); enable_vfp(); diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c index b2af42e..1bebf86 100644 --- a/xen/arch/arm/smpboot.c +++ b/xen/arch/arm/smpboot.c @@ -148,8 +148,9 @@ void __cpuinit start_secondary(unsigned long boot_phys_offset, *c = boot_cpu_data; identify_cpu(c); - /* Setup Hyp vector base */ - WRITE_SYSREG((vaddr_t)hyp_traps_vector, VBAR_EL2); + init_traps(); + + setup_virt_paging(); mmu_init_secondary_cpu(); enable_vfp(); diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index cf28974..2cc31ab 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -53,6 +53,17 @@ integer_param("debug_stack_lines", debug_stack_lines); #define stack_words_per_line 8 + +void __cpuinit init_traps(void) +{ + /* Setup Hyp vector base */ + WRITE_SYSREG((vaddr_t)hyp_traps_vector, VBAR_EL2); + + /* Setup hypervisor traps */ + WRITE_SYSREG(HCR_PTW|HCR_BSU_OUTER|HCR_AMO|HCR_IMO|HCR_VM|HCR_TWI, HCR_EL2); + isb(); +} + asmlinkage void __div0(void) { printk("Division by zero in hypervisor.\n"); diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h index 19e5bc2..4be383e 100644 --- a/xen/include/asm-arm/mm.h +++ b/xen/include/asm-arm/mm.h @@ -138,8 +138,10 @@ extern unsigned long total_pages; /* Boot-time pagetable setup */ extern void setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr); -/* MMU setup for seccondary CPUS (which already have paging enabled) */ +/* MMU setup for secondary CPUS (which already have paging enabled) */ extern void __cpuinit mmu_init_secondary_cpu(void); +/* Second stage paging setup, to be called on all CPUs */ +extern void __cpuinit setup_virt_paging(void); /* Set up the xenheap: up to 1GB of contiguous, always-mapped memory. * Base must be 32MB aligned and size a multiple of 32MB. */ extern void setup_xenheap_mappings(unsigned long base_mfn, unsigned long nr_mfns); diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h index 6fbe2fa..1681ebf 100644 --- a/xen/include/asm-arm/processor.h +++ b/xen/include/asm-arm/processor.h @@ -353,6 +353,8 @@ union hsr { #ifndef __ASSEMBLY__ extern uint32_t hyp_traps_vector[]; +void init_traps(void); + void panic_PAR(uint64_t par); void show_execution_state(struct cpu_user_regs *regs); -- 1.7.10.4
Ian Campbell
2013-Mar-06 08:54 UTC
[PATCH 3/9] arm: gic: implement IPIs using SGI mechanism
From: Ian Campbell <ian.campbell@citrix.com> Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- xen/arch/arm/arm32/mode_switch.S | 2 +- xen/arch/arm/gic.c | 88 +++++++++++++++++++++++++++++++++++--- xen/arch/arm/smp.c | 14 +++--- xen/include/asm-arm/gic.h | 22 +++++++++- 4 files changed, 108 insertions(+), 18 deletions(-) diff --git a/xen/arch/arm/arm32/mode_switch.S b/xen/arch/arm/arm32/mode_switch.S index bc2be74..d6741d0 100644 --- a/xen/arch/arm/arm32/mode_switch.S +++ b/xen/arch/arm/arm32/mode_switch.S @@ -43,7 +43,7 @@ kick_cpus: mov r2, #0x1 str r2, [r0, #(GICD_CTLR * 4)] /* enable distributor */ mov r2, #0xfe0000 - str r2, [r0, #(GICD_SGIR * 4)] /* send IPI to everybody */ + str r2, [r0, #(GICD_SGIR * 4)] /* send IPI to everybody, SGI0 = Event check */ dsb str r2, [r0, #(GICD_CTLR * 4)] /* disable distributor */ mov pc, lr diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c index bbb8e04..6592562 100644 --- a/xen/arch/arm/gic.c +++ b/xen/arch/arm/gic.c @@ -354,10 +354,55 @@ void __init gic_init(void) spin_unlock(&gic.lock); } +void send_SGI_mask(const cpumask_t *cpumask, enum gic_sgi sgi) +{ + unsigned long mask = cpumask_bits(cpumask)[0]; + + ASSERT(sgi < 16); /* There are only 16 SGIs */ + + mask &= cpumask_bits(&cpu_online_map)[0]; + + ASSERT(mask < 0x100); /* The target bitmap only supports 8 CPUs */ + + dsb(); + + GICD[GICD_SGIR] = GICD_SGI_TARGET_LIST + | (mask<<GICD_SGI_TARGET_SHIFT) + | GICD_SGI_GROUP1 + | sgi; +} + +void send_SGI_one(unsigned int cpu, enum gic_sgi sgi) +{ + ASSERT(cpu < 7); /* Targets bitmap only supports 8 CPUs */ + send_SGI_mask(cpumask_of(cpu), sgi); +} + +void send_SGI_self(enum gic_sgi sgi) +{ + ASSERT(sgi < 16); /* There are only 16 SGIs */ + + dsb(); + + GICD[GICD_SGIR] = GICD_SGI_TARGET_SELF + | GICD_SGI_GROUP1 + | sgi; +} + +void send_SGI_allbutself(enum gic_sgi sgi) +{ + ASSERT(sgi < 16); /* There are only 16 SGIs */ + + dsb(); + + GICD[GICD_SGIR] = GICD_SGI_TARGET_OTHERS + | GICD_SGI_GROUP1 + | sgi; +} + void smp_send_state_dump(unsigned int cpu) { - printk("WARNING: unable to send state dump request to CPU%d\n", cpu); - /* XXX TODO -- send an SGI */ + send_SGI_one(cpu, GIC_SGI_DUMP_STATE); } /* Set up the per-CPU parts of the GIC for a secondary CPU */ @@ -585,6 +630,28 @@ out: return retval; } +static void do_sgi(struct cpu_user_regs *regs, int othercpu, enum gic_sgi sgi) +{ + /* Lower the priority */ + GICC[GICC_EOIR] = sgi; + + switch (sgi) + { + case GIC_SGI_EVENT_CHECK: + /* Nothing to do, will check for events on return path */ + break; + case GIC_SGI_DUMP_STATE: + dump_execstate(regs); + break; + default: + panic("Unhandled SGI %d on CPU%d\n", sgi, smp_processor_id()); + break; + } + + /* Deactivate */ + GICC[GICC_DIR] = sgi; +} + /* Accept an interrupt from the GIC and dispatch its handler */ void gic_interrupt(struct cpu_user_regs *regs, int is_fiq) { @@ -595,14 +662,23 @@ void gic_interrupt(struct cpu_user_regs *regs, int is_fiq) do { intack = GICC[GICC_IAR]; irq = intack & GICC_IA_IRQ; - local_irq_enable(); - if (likely(irq < 1021)) + if ( likely(irq >= 16 && irq < 1021) ) + { + local_irq_enable(); do_IRQ(regs, irq, is_fiq); + local_irq_disable(); + } + else if (unlikely(irq < 16)) + { + unsigned int cpu = (intack & GICC_IA_CPU_MASK) >> GICC_IA_CPU_SHIFT; + do_sgi(regs, cpu, irq); + } else + { + local_irq_disable(); break; - - local_irq_disable(); + } } while (1); } diff --git a/xen/arch/arm/smp.c b/xen/arch/arm/smp.c index 12260f4..a902d84 100644 --- a/xen/arch/arm/smp.c +++ b/xen/arch/arm/smp.c @@ -3,10 +3,11 @@ #include <asm/smp.h> #include <asm/cpregs.h> #include <asm/page.h> +#include <asm/gic.h> void flush_tlb_mask(const cpumask_t *mask) { - /* XXX IPI other processors */ + /* No need to IPI other processors on ARM, the processor takes care of it. */ flush_xen_data_tlb(); } @@ -15,17 +16,12 @@ void smp_call_function( void *info, int wait) { - /* TODO: No SMP just now, does not include self so nothing to do. - cpumask_t allbutself = cpu_online_map; - cpu_clear(smp_processor_id(), allbutself); - on_selected_cpus(&allbutself, func, info, wait); - */ + panic("%s not implmented\n", __func__); } + void smp_send_event_check_mask(const cpumask_t *mask) { - /* TODO: No SMP just now, does not include self so nothing to do. - send_IPI_mask(mask, EVENT_CHECK_VECTOR); - */ + send_SGI_mask(mask, GIC_SGI_EVENT_CHECK); } /* diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h index 6bf50bb..24c0d5c 100644 --- a/xen/include/asm-arm/gic.h +++ b/xen/include/asm-arm/gic.h @@ -51,6 +51,13 @@ #define GICD_SPENDSGIRN (0xF2C/4) #define GICD_ICPIDR2 (0xFE8/4) +#define GICD_SGI_TARGET_LIST (0UL<<24) +#define GICD_SGI_TARGET_OTHERS (1UL<<24) +#define GICD_SGI_TARGET_SELF (2UL<<24) +#define GICD_SGI_TARGET_SHIFT (16) +#define GICD_SGI_TARGET_MASK (0xFFUL<<GICD_SGI_TARGET_SHIFT) +#define GICD_SGI_GROUP1 (1UL<<15) + #define GICC_CTLR (0x0000/4) #define GICC_PMR (0x0004/4) #define GICC_BPR (0x0008/4) @@ -83,8 +90,9 @@ #define GICC_CTL_ENABLE 0x1 #define GICC_CTL_EOI (0x1 << 9) -#define GICC_IA_IRQ 0x03ff -#define GICC_IA_CPU 0x1c00 +#define GICC_IA_IRQ 0x03ff +#define GICC_IA_CPU_MASK 0x1c00 +#define GICC_IA_CPU_SHIFT 10 #define GICH_HCR_EN (1 << 0) #define GICH_HCR_UIE (1 << 1) @@ -157,6 +165,16 @@ extern int gicv_setup(struct domain *d); extern void gic_save_state(struct vcpu *v); extern void gic_restore_state(struct vcpu *v); +/* SGI (AKA IPIs) */ +enum gic_sgi { + GIC_SGI_EVENT_CHECK = 0, + GIC_SGI_DUMP_STATE = 1, +}; +extern void send_SGI_mask(const cpumask_t *cpumask, enum gic_sgi sgi); +extern void send_SGI_one(unsigned int cpu, enum gic_sgi sgi); +extern void send_SGI_self(enum gic_sgi sgi); +extern void send_SGI_allbutself(enum gic_sgi sgi); + /* print useful debug info */ extern void gic_dump_info(struct vcpu *v); -- 1.7.10.4
From: Ian Campbell <ian.campbell@citrix.com> The current cpuid is held in x22 not x12. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- xen/arch/arm/arm64/head.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S index b7ab251..e5c00be 100644 --- a/xen/arch/arm/arm64/head.S +++ b/xen/arch/arm/arm64/head.S @@ -307,7 +307,7 @@ paging: dsb sy ldr x0, =smp_up_cpu ldr x1, [x0] /* Which CPU is being booted? */ - cmp x1, x12 /* Is it us? */ + cmp x1, x22 /* Is it us? */ b.ne 1b launch: -- 1.7.10.4
From: Ian Campbell <ian.campbell@citrix.com> At least we were consistent, but lets be correct too Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- xen/arch/arm/vgic.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c index 8efcefc..dbfcd04 100644 --- a/xen/arch/arm/vgic.c +++ b/xen/arch/arm/vgic.c @@ -286,7 +286,7 @@ static int vgic_distr_mmio_read(struct vcpu *v, mmio_info_t *info) return 1; case GICD_NSACR ... GICD_NSACRN: - /* We do not implement securty extensions for guests, read zero */ + /* We do not implement security extensions for guests, read zero */ goto read_as_zero; case GICD_SGIR: @@ -396,7 +396,7 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info) goto write_ignore; case GICD_IGROUPR ... GICD_IGROUPRN: - /* We do not implement securty extensions for guests, write ignore */ + /* We do not implement security extensions for guests, write ignore */ goto write_ignore; case GICD_ISENABLER ... GICD_ISENABLERN: @@ -494,7 +494,7 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info) return 1; case GICD_NSACR ... GICD_NSACRN: - /* We do not implement securty extensions for guests, write ignore */ + /* We do not implement security extensions for guests, write ignore */ goto write_ignore; case GICD_SGIR: -- 1.7.10.4
Ian Campbell
2013-Mar-06 08:54 UTC
[PATCH 6/9] arm: vgic: fix race in vgic_vcpu_inject_irq
From: Ian Campbell <ian.campbell@citrix.com> The initial check for a still pending interrupt (!list_empty(&n->inflight)) needs to be covered by the vgic lock to avoid trying to insert the IRQ into the inflight list simultaneously on 2 pCPUS. Expand the area covered by the lock appropriately. Also consolidate the unlocks on the exit path into one location. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- xen/arch/arm/vgic.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c index dbfcd04..b30da78 100644 --- a/xen/arch/arm/vgic.c +++ b/xen/arch/arm/vgic.c @@ -584,9 +584,14 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq, int virtual) struct pending_irq *iter, *n = irq_to_pending(v, irq); unsigned long flags; - /* irq still pending */ + spin_lock_irqsave(&v->arch.vgic.lock, flags); + + /* irq already pending */ if (!list_empty(&n->inflight)) + { + spin_unlock_irqrestore(&v->arch.vgic.lock, flags); return; + } priority = byte_read(rank->ipriority[REG_RANK_INDEX(8, idx)], 0, byte); @@ -601,20 +606,18 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq, int virtual) if ( rank->ienable & (1 << (irq % 32)) ) gic_set_guest_irq(v, irq, GICH_LR_PENDING, priority); - spin_lock_irqsave(&v->arch.vgic.lock, flags); list_for_each_entry ( iter, &v->arch.vgic.inflight_irqs, inflight ) { if ( iter->priority > priority ) { list_add_tail(&n->inflight, &iter->inflight); - spin_unlock_irqrestore(&v->arch.vgic.lock, flags); goto out; } } list_add_tail(&n->inflight, &v->arch.vgic.inflight_irqs); +out: spin_unlock_irqrestore(&v->arch.vgic.lock, flags); /* we have a new higher priority irq, inject it into the guest */ -out: vcpu_unblock(v); } -- 1.7.10.4
Ian Campbell
2013-Mar-06 08:54 UTC
[PATCH 7/9] arm: vgic: fix race between evtchn upcall and evtchnop_send
From: Ian Campbell <ian.campbell@citrix.com> On ARM the evtchn upcall is done by using a local PPI interrupt. However the guest will clear the evtchn_upcall_pending bit before it EOIs that PPI (which happens late). This means vgic_vcpu_inject_irq (called via vcpu_mark_events_pending) sees the PPI as in flight and ends up not reinjecting it, if this happens after the guest has finished its event channel processing loop but before the EOI then we have lost the upcall. We therefore also need to call gic_restore_pending_irqs on the exit to guest path in order to pickup any newly inject IRQ and propagate it into a free LR. This doesn''t currently support bumping a lower priority interrupt out of the LRs in order to inject a new higher priority interrupt. We don''t yet implement interrupt prioritisation (and guests don''t use it either) so this will do for now. Since gic_restore_pending_irqs is now called in the return to guest path it is called with interrupts disabled and accordinly must use the irqsave/irqrestore spinlock primitives. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- xen/arch/arm/gic.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c index 6592562..59e007a 100644 --- a/xen/arch/arm/gic.c +++ b/xen/arch/arm/gic.c @@ -556,17 +556,18 @@ static void gic_restore_pending_irqs(struct vcpu *v) { int i; struct pending_irq *p, *t; + unsigned long flags; list_for_each_entry_safe ( p, t, &v->arch.vgic.lr_pending, lr_queue ) { i = find_first_zero_bit(&this_cpu(lr_mask), nr_lrs); if ( i >= nr_lrs ) return; - spin_lock_irq(&gic.lock); + spin_lock_irqsave(&gic.lock, flags); gic_set_lr(i, p->irq, GICH_LR_PENDING, p->priority); list_del_init(&p->lr_queue); set_bit(i, &this_cpu(lr_mask)); - spin_unlock_irq(&gic.lock); + spin_unlock_irqrestore(&gic.lock, flags); } } @@ -589,6 +590,10 @@ static void gic_inject_irq_stop(void) void gic_inject(void) { + if ( vcpu_info(current, evtchn_upcall_pending) ) + vgic_vcpu_inject_irq(current, VGIC_IRQ_EVTCHN_CALLBACK, 1); + + gic_restore_pending_irqs(current); if (!this_cpu(lr_mask)) gic_inject_irq_stop(); else -- 1.7.10.4
From: Ian Campbell <ian.campbell@citrix.com> lr_mask is a uint64_t and so needs to be printed with PRIx64. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- xen/arch/arm/gic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c index 59e007a..2b60532 100644 --- a/xen/arch/arm/gic.c +++ b/xen/arch/arm/gic.c @@ -741,7 +741,7 @@ void gic_dump_info(struct vcpu *v) int i; struct pending_irq *p; - printk("GICH_LRs (vcpu %d) mask=%llx\n", v->vcpu_id, v->arch.lr_mask); + printk("GICH_LRs (vcpu %d) mask=%"PRIx64"\n", v->vcpu_id, v->arch.lr_mask); if ( v == current ) { for ( i = 0; i < nr_lrs; i++ ) -- 1.7.10.4
From: Ian Campbell <ian.campbell@citrix.com> Make the "CPU<n> booted" message fit in with the surrounding logging. Log which CPU is taking an unexpected trap. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- xen/arch/arm/smpboot.c | 2 +- xen/arch/arm/traps.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c index 1bebf86..bd353c8 100644 --- a/xen/arch/arm/smpboot.c +++ b/xen/arch/arm/smpboot.c @@ -178,7 +178,7 @@ void __cpuinit start_secondary(unsigned long boot_phys_offset, local_irq_enable(); - dprintk(XENLOG_DEBUG, "CPU %u booted.\n", smp_processor_id()); + printk(XENLOG_DEBUG "CPU %u booted.\n", smp_processor_id()); startup_cpu_idle_loop(); } diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index 2cc31ab..a98a45e 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -578,7 +578,7 @@ void vcpu_show_execution_state(struct vcpu *v) void do_unexpected_trap(const char *msg, struct cpu_user_regs *regs) { - printk("Unexpected Trap: %s\n", msg); + printk("CPU%d: Unexpected Trap: %s\n", smp_processor_id(), msg); show_execution_state(regs); while(1); } -- 1.7.10.4
Stefano Stabellini
2013-Mar-19 15:58 UTC
Re: [PATCH 1/9] arm: initialise VCPU SCTLR in vcpu_initialise
On Wed, 6 Mar 2013, Ian Campbell wrote:> From: Ian Campbell <ian.campbell@citrix.com> > > Ensuring a sane initial starting state for vcpus other than domain 0s. > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>> xen/arch/arm/domain.c | 2 ++ > xen/arch/arm/domain_build.c | 2 -- > 2 files changed, 2 insertions(+), 2 deletions(-) > > diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c > index 9be83a6..f369871 100644 > --- a/xen/arch/arm/domain.c > +++ b/xen/arch/arm/domain.c > @@ -435,6 +435,8 @@ int vcpu_initialise(struct vcpu *v) > if ( is_idle_vcpu(v) ) > return rc; > > + v->arch.sctlr = SCTLR_BASE; > + > if ( (rc = vcpu_vgic_init(v)) != 0 ) > return rc; > > diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c > index 07303fe..bdf41fa 100644 > --- a/xen/arch/arm/domain_build.c > +++ b/xen/arch/arm/domain_build.c > @@ -373,8 +373,6 @@ int construct_dom0(struct domain *d) > } > #endif > > - v->arch.sctlr = SCTLR_BASE; > - > WRITE_SYSREG(HCR_PTW|HCR_BSU_OUTER|HCR_AMO|HCR_IMO|HCR_VM|HCR_TWI, HCR_EL2); > isb(); > > -- > 1.7.10.4 >
On Wed, 6 Mar 2013, Ian Campbell wrote:> From: Ian Campbell <ian.campbell@citrix.com> > > lr_mask is a uint64_t and so needs to be printed with PRIx64. > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>> xen/arch/arm/gic.c | 2 +- > 1 file changed, 1 insertion(+), 1 deletion(-) > > diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c > index 59e007a..2b60532 100644 > --- a/xen/arch/arm/gic.c > +++ b/xen/arch/arm/gic.c > @@ -741,7 +741,7 @@ void gic_dump_info(struct vcpu *v) > int i; > struct pending_irq *p; > > - printk("GICH_LRs (vcpu %d) mask=%llx\n", v->vcpu_id, v->arch.lr_mask); > + printk("GICH_LRs (vcpu %d) mask=%"PRIx64"\n", v->vcpu_id, v->arch.lr_mask); > if ( v == current ) > { > for ( i = 0; i < nr_lrs; i++ ) > -- > 1.7.10.4 >
Stefano Stabellini
2013-Mar-19 16:05 UTC
Re: [PATCH 9/9] arm: tweak/improve logging for host SMP
On Wed, 6 Mar 2013, Ian Campbell wrote:> From: Ian Campbell <ian.campbell@citrix.com> > > Make the "CPU<n> booted" message fit in with the surrounding logging. > > Log which CPU is taking an unexpected trap. > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>> xen/arch/arm/smpboot.c | 2 +- > xen/arch/arm/traps.c | 2 +- > 2 files changed, 2 insertions(+), 2 deletions(-) > > diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c > index 1bebf86..bd353c8 100644 > --- a/xen/arch/arm/smpboot.c > +++ b/xen/arch/arm/smpboot.c > @@ -178,7 +178,7 @@ void __cpuinit start_secondary(unsigned long boot_phys_offset, > > local_irq_enable(); > > - dprintk(XENLOG_DEBUG, "CPU %u booted.\n", smp_processor_id()); > + printk(XENLOG_DEBUG "CPU %u booted.\n", smp_processor_id()); > > startup_cpu_idle_loop(); > } > diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c > index 2cc31ab..a98a45e 100644 > --- a/xen/arch/arm/traps.c > +++ b/xen/arch/arm/traps.c > @@ -578,7 +578,7 @@ void vcpu_show_execution_state(struct vcpu *v) > > void do_unexpected_trap(const char *msg, struct cpu_user_regs *regs) > { > - printk("Unexpected Trap: %s\n", msg); > + printk("CPU%d: Unexpected Trap: %s\n", smp_processor_id(), msg); > show_execution_state(regs); > while(1); > } > -- > 1.7.10.4 >
Stefano Stabellini
2013-Mar-19 16:06 UTC
Re: [PATCH 5/9] arm: vgic: typo s/securty/security/
On Wed, 6 Mar 2013, Ian Campbell wrote:> From: Ian Campbell <ian.campbell@citrix.com> > > At least we were consistent, but lets be correct too > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>> xen/arch/arm/vgic.c | 6 +++--- > 1 file changed, 3 insertions(+), 3 deletions(-) > > diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c > index 8efcefc..dbfcd04 100644 > --- a/xen/arch/arm/vgic.c > +++ b/xen/arch/arm/vgic.c > @@ -286,7 +286,7 @@ static int vgic_distr_mmio_read(struct vcpu *v, mmio_info_t *info) > return 1; > > case GICD_NSACR ... GICD_NSACRN: > - /* We do not implement securty extensions for guests, read zero */ > + /* We do not implement security extensions for guests, read zero */ > goto read_as_zero; > > case GICD_SGIR: > @@ -396,7 +396,7 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info) > goto write_ignore; > > case GICD_IGROUPR ... GICD_IGROUPRN: > - /* We do not implement securty extensions for guests, write ignore */ > + /* We do not implement security extensions for guests, write ignore */ > goto write_ignore; > > case GICD_ISENABLER ... GICD_ISENABLERN: > @@ -494,7 +494,7 @@ static int vgic_distr_mmio_write(struct vcpu *v, mmio_info_t *info) > return 1; > > case GICD_NSACR ... GICD_NSACRN: > - /* We do not implement securty extensions for guests, write ignore */ > + /* We do not implement security extensions for guests, write ignore */ > goto write_ignore; > > case GICD_SGIR: > -- > 1.7.10.4 >
Stefano Stabellini
2013-Mar-19 16:10 UTC
Re: [PATCH 6/9] arm: vgic: fix race in vgic_vcpu_inject_irq
On Wed, 6 Mar 2013, Ian Campbell wrote:> From: Ian Campbell <ian.campbell@citrix.com> > > The initial check for a still pending interrupt (!list_empty(&n->inflight)) > needs to be covered by the vgic lock to avoid trying to insert the IRQ into the > inflight list simultaneously on 2 pCPUS. Expand the area covered by the lock > appropriately. > > Also consolidate the unlocks on the exit path into one location. > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>> xen/arch/arm/vgic.c | 11 +++++++---- > 1 file changed, 7 insertions(+), 4 deletions(-) > > diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c > index dbfcd04..b30da78 100644 > --- a/xen/arch/arm/vgic.c > +++ b/xen/arch/arm/vgic.c > @@ -584,9 +584,14 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq, int virtual) > struct pending_irq *iter, *n = irq_to_pending(v, irq); > unsigned long flags; > > - /* irq still pending */ > + spin_lock_irqsave(&v->arch.vgic.lock, flags); > + > + /* irq already pending */ > if (!list_empty(&n->inflight)) > + { > + spin_unlock_irqrestore(&v->arch.vgic.lock, flags); > return; > + } > > priority = byte_read(rank->ipriority[REG_RANK_INDEX(8, idx)], 0, byte); > > @@ -601,20 +606,18 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq, int virtual) > if ( rank->ienable & (1 << (irq % 32)) ) > gic_set_guest_irq(v, irq, GICH_LR_PENDING, priority); > > - spin_lock_irqsave(&v->arch.vgic.lock, flags); > list_for_each_entry ( iter, &v->arch.vgic.inflight_irqs, inflight ) > { > if ( iter->priority > priority ) > { > list_add_tail(&n->inflight, &iter->inflight); > - spin_unlock_irqrestore(&v->arch.vgic.lock, flags); > goto out; > } > } > list_add_tail(&n->inflight, &v->arch.vgic.inflight_irqs); > +out: > spin_unlock_irqrestore(&v->arch.vgic.lock, flags); > /* we have a new higher priority irq, inject it into the guest */ > -out: > vcpu_unblock(v); > } > > -- > 1.7.10.4 >
Stefano Stabellini
2013-Mar-19 16:18 UTC
Re: [PATCH 7/9] arm: vgic: fix race between evtchn upcall and evtchnop_send
On Wed, 6 Mar 2013, Ian Campbell wrote:> From: Ian Campbell <ian.campbell@citrix.com> > > On ARM the evtchn upcall is done by using a local PPI interrupt. However the > guest will clear the evtchn_upcall_pending bit before it EOIs that PPI (which > happens late). This means vgic_vcpu_inject_irq (called via > vcpu_mark_events_pending) sees the PPI as in flight and ends up not reinjecting > it, if this happens after the guest has finished its event channel processing > loop but before the EOI then we have lost the upcall. > > We therefore also need to call gic_restore_pending_irqs on the exit to guest > path in order to pickup any newly inject IRQ and propagate it into a free LR.You forgot to mention that on exit to guest, before calling gic_restore_pending_irqs, we check whether we need to reinject the PPI. Aside from this, the patch is fine.> This doesn''t currently support bumping a lower priority interrupt out of the > LRs in order to inject a new higher priority interrupt. We don''t yet implement > interrupt prioritisation (and guests don''t use it either) so this will do for > now. > > Since gic_restore_pending_irqs is now called in the return to guest path it is > called with interrupts disabled and accordinly must use the irqsave/irqrestore > spinlock primitives. > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > --- > xen/arch/arm/gic.c | 9 +++++++-- > 1 file changed, 7 insertions(+), 2 deletions(-) > > diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c > index 6592562..59e007a 100644 > --- a/xen/arch/arm/gic.c > +++ b/xen/arch/arm/gic.c > @@ -556,17 +556,18 @@ static void gic_restore_pending_irqs(struct vcpu *v) > { > int i; > struct pending_irq *p, *t; > + unsigned long flags; > > list_for_each_entry_safe ( p, t, &v->arch.vgic.lr_pending, lr_queue ) > { > i = find_first_zero_bit(&this_cpu(lr_mask), nr_lrs); > if ( i >= nr_lrs ) return; > > - spin_lock_irq(&gic.lock); > + spin_lock_irqsave(&gic.lock, flags); > gic_set_lr(i, p->irq, GICH_LR_PENDING, p->priority); > list_del_init(&p->lr_queue); > set_bit(i, &this_cpu(lr_mask)); > - spin_unlock_irq(&gic.lock); > + spin_unlock_irqrestore(&gic.lock, flags); > } > > } > @@ -589,6 +590,10 @@ static void gic_inject_irq_stop(void) > > void gic_inject(void) > { > + if ( vcpu_info(current, evtchn_upcall_pending) ) > + vgic_vcpu_inject_irq(current, VGIC_IRQ_EVTCHN_CALLBACK, 1); > + > + gic_restore_pending_irqs(current); > if (!this_cpu(lr_mask)) > gic_inject_irq_stop(); > else > -- > 1.7.10.4 >
Stefano Stabellini
2013-Mar-19 17:28 UTC
Re: [PATCH 3/9] arm: gic: implement IPIs using SGI mechanism
On Wed, 6 Mar 2013, Ian Campbell wrote:> From: Ian Campbell <ian.campbell@citrix.com> > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > --- > xen/arch/arm/arm32/mode_switch.S | 2 +- > xen/arch/arm/gic.c | 88 +++++++++++++++++++++++++++++++++++--- > xen/arch/arm/smp.c | 14 +++--- > xen/include/asm-arm/gic.h | 22 +++++++++- > 4 files changed, 108 insertions(+), 18 deletions(-) > > diff --git a/xen/arch/arm/arm32/mode_switch.S b/xen/arch/arm/arm32/mode_switch.S > index bc2be74..d6741d0 100644 > --- a/xen/arch/arm/arm32/mode_switch.S > +++ b/xen/arch/arm/arm32/mode_switch.S > @@ -43,7 +43,7 @@ kick_cpus: > mov r2, #0x1 > str r2, [r0, #(GICD_CTLR * 4)] /* enable distributor */ > mov r2, #0xfe0000 > - str r2, [r0, #(GICD_SGIR * 4)] /* send IPI to everybody */ > + str r2, [r0, #(GICD_SGIR * 4)] /* send IPI to everybody, SGI0 = Event check */ > dsb > str r2, [r0, #(GICD_CTLR * 4)] /* disable distributor */ > mov pc, lr > diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c > index bbb8e04..6592562 100644 > --- a/xen/arch/arm/gic.c > +++ b/xen/arch/arm/gic.c > @@ -354,10 +354,55 @@ void __init gic_init(void) > spin_unlock(&gic.lock); > } > > +void send_SGI_mask(const cpumask_t *cpumask, enum gic_sgi sgi) > +{ > + unsigned long mask = cpumask_bits(cpumask)[0]; > + > + ASSERT(sgi < 16); /* There are only 16 SGIs */ > + > + mask &= cpumask_bits(&cpu_online_map)[0]; > + > + ASSERT(mask < 0x100); /* The target bitmap only supports 8 CPUs */ > + > + dsb(); > + > + GICD[GICD_SGIR] = GICD_SGI_TARGET_LIST > + | (mask<<GICD_SGI_TARGET_SHIFT) > + | GICD_SGI_GROUP1Are you sure that this is correct? The manual says: "This field is writable only by a Secure access. Any Non-secure write to the GICD_SGIR generates an SGI only if the specified SGI is programmed as Group 1, regardless of the value of bit[15] of the write." I think that we should leave bit 15 alone.> + | sgi; > +} > + > +void send_SGI_one(unsigned int cpu, enum gic_sgi sgi) > +{ > + ASSERT(cpu < 7); /* Targets bitmap only supports 8 CPUs */ > + send_SGI_mask(cpumask_of(cpu), sgi); > +} > + > +void send_SGI_self(enum gic_sgi sgi) > +{ > + ASSERT(sgi < 16); /* There are only 16 SGIs */ > + > + dsb(); > + > + GICD[GICD_SGIR] = GICD_SGI_TARGET_SELF > + | GICD_SGI_GROUP1same here> + | sgi; > +} > + > +void send_SGI_allbutself(enum gic_sgi sgi) > +{ > + ASSERT(sgi < 16); /* There are only 16 SGIs */ > + > + dsb(); > + > + GICD[GICD_SGIR] = GICD_SGI_TARGET_OTHERS > + | GICD_SGI_GROUP1same here> + | sgi; > +} > + > void smp_send_state_dump(unsigned int cpu) > { > - printk("WARNING: unable to send state dump request to CPU%d\n", cpu); > - /* XXX TODO -- send an SGI */ > + send_SGI_one(cpu, GIC_SGI_DUMP_STATE); > } > > /* Set up the per-CPU parts of the GIC for a secondary CPU */ > @@ -585,6 +630,28 @@ out: > return retval; > } > > +static void do_sgi(struct cpu_user_regs *regs, int othercpu, enum gic_sgi sgi) > +{ > + /* Lower the priority */ > + GICC[GICC_EOIR] = sgi; > + > + switch (sgi) > + { > + case GIC_SGI_EVENT_CHECK: > + /* Nothing to do, will check for events on return path */ > + break; > + case GIC_SGI_DUMP_STATE: > + dump_execstate(regs); > + break; > + default: > + panic("Unhandled SGI %d on CPU%d\n", sgi, smp_processor_id()); > + break; > + } > + > + /* Deactivate */ > + GICC[GICC_DIR] = sgi; > +} > + > /* Accept an interrupt from the GIC and dispatch its handler */ > void gic_interrupt(struct cpu_user_regs *regs, int is_fiq) > { > @@ -595,14 +662,23 @@ void gic_interrupt(struct cpu_user_regs *regs, int is_fiq) > do { > intack = GICC[GICC_IAR]; > irq = intack & GICC_IA_IRQ; > - local_irq_enable(); > > - if (likely(irq < 1021)) > + if ( likely(irq >= 16 && irq < 1021) ) > + { > + local_irq_enable(); > do_IRQ(regs, irq, is_fiq); > + local_irq_disable(); > + } > + else if (unlikely(irq < 16)) > + { > + unsigned int cpu = (intack & GICC_IA_CPU_MASK) >> GICC_IA_CPU_SHIFT; > + do_sgi(regs, cpu, irq); > + } > else > + { > + local_irq_disable(); > break; > - > - local_irq_disable(); > + } > } while (1); > } > > diff --git a/xen/arch/arm/smp.c b/xen/arch/arm/smp.c > index 12260f4..a902d84 100644 > --- a/xen/arch/arm/smp.c > +++ b/xen/arch/arm/smp.c > @@ -3,10 +3,11 @@ > #include <asm/smp.h> > #include <asm/cpregs.h> > #include <asm/page.h> > +#include <asm/gic.h> > > void flush_tlb_mask(const cpumask_t *mask) > { > - /* XXX IPI other processors */ > + /* No need to IPI other processors on ARM, the processor takes care of it. */ > flush_xen_data_tlb();From the ARMv7 manual, chapter B3.10.5: "For an ARMv7 implementation that does not include the Multiprocessing Extensions, the architecture defines that a TLB maintenance operation applies only to any TLBs that are used in translating memory accesses made by the processor performing the maintenance operation. The ARMv7 Multiprocessing Extensions are an OPTIONAL set of extensions that improve the implementation of a multiprocessor system. These extensions provide additional TLB maintenance operations that apply to the TLBs of processors in the same Inner Shareable domain." From this paragraph I understand that we wouldn''t need to do an IPI if flush_xen_data_tlb was implemented using TLBIALLHIS. But actually flush_xen_data_tlb uses TLBIALLH, that it seems not to propagate across an Inner Shareable domain. Did I misunderstand something?> } > > @@ -15,17 +16,12 @@ void smp_call_function( > void *info, > int wait) > { > - /* TODO: No SMP just now, does not include self so nothing to do. > - cpumask_t allbutself = cpu_online_map; > - cpu_clear(smp_processor_id(), allbutself); > - on_selected_cpus(&allbutself, func, info, wait); > - */ > + panic("%s not implmented\n", __func__); > } > + > void smp_send_event_check_mask(const cpumask_t *mask) > { > - /* TODO: No SMP just now, does not include self so nothing to do. > - send_IPI_mask(mask, EVENT_CHECK_VECTOR); > - */ > + send_SGI_mask(mask, GIC_SGI_EVENT_CHECK); > } > > /* > diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h > index 6bf50bb..24c0d5c 100644 > --- a/xen/include/asm-arm/gic.h > +++ b/xen/include/asm-arm/gic.h > @@ -51,6 +51,13 @@ > #define GICD_SPENDSGIRN (0xF2C/4) > #define GICD_ICPIDR2 (0xFE8/4) > > +#define GICD_SGI_TARGET_LIST (0UL<<24) > +#define GICD_SGI_TARGET_OTHERS (1UL<<24) > +#define GICD_SGI_TARGET_SELF (2UL<<24) > +#define GICD_SGI_TARGET_SHIFT (16) > +#define GICD_SGI_TARGET_MASK (0xFFUL<<GICD_SGI_TARGET_SHIFT) > +#define GICD_SGI_GROUP1 (1UL<<15) > + > #define GICC_CTLR (0x0000/4) > #define GICC_PMR (0x0004/4) > #define GICC_BPR (0x0008/4) > @@ -83,8 +90,9 @@ > #define GICC_CTL_ENABLE 0x1 > #define GICC_CTL_EOI (0x1 << 9) > > -#define GICC_IA_IRQ 0x03ff > -#define GICC_IA_CPU 0x1c00 > +#define GICC_IA_IRQ 0x03ff > +#define GICC_IA_CPU_MASK 0x1c00 > +#define GICC_IA_CPU_SHIFT 10 > > #define GICH_HCR_EN (1 << 0) > #define GICH_HCR_UIE (1 << 1) > @@ -157,6 +165,16 @@ extern int gicv_setup(struct domain *d); > extern void gic_save_state(struct vcpu *v); > extern void gic_restore_state(struct vcpu *v); > > +/* SGI (AKA IPIs) */ > +enum gic_sgi { > + GIC_SGI_EVENT_CHECK = 0, > + GIC_SGI_DUMP_STATE = 1, > +}; > +extern void send_SGI_mask(const cpumask_t *cpumask, enum gic_sgi sgi); > +extern void send_SGI_one(unsigned int cpu, enum gic_sgi sgi); > +extern void send_SGI_self(enum gic_sgi sgi); > +extern void send_SGI_allbutself(enum gic_sgi sgi); > + > /* print useful debug info */ > extern void gic_dump_info(struct vcpu *v); > > -- > 1.7.10.4 >
Stefano Stabellini
2013-Mar-19 17:34 UTC
Re: [PATCH 2/9] arm: consolidate setup of hypervisor traps and second stage paging
On Wed, 6 Mar 2013, Ian Campbell wrote:> From: Ian Campbell <ian.campbell@citrix.com> > > In particular be sure to initisalise HCR_EL2 on secondary processors. > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>> xen/arch/arm/domain_build.c | 7 ------- > xen/arch/arm/mm.c | 10 ++++++++++ > xen/arch/arm/setup.c | 13 +++---------- > xen/arch/arm/smpboot.c | 5 +++-- > xen/arch/arm/traps.c | 11 +++++++++++ > xen/include/asm-arm/mm.h | 4 +++- > xen/include/asm-arm/processor.h | 2 ++ > 7 files changed, 32 insertions(+), 20 deletions(-) > > diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c > index bdf41fa..f161845 100644 > --- a/xen/arch/arm/domain_build.c > +++ b/xen/arch/arm/domain_build.c > @@ -322,10 +322,6 @@ int construct_dom0(struct domain *d) > gic_route_irq_to_guest(d, 46, "lcd"); > gic_route_irq_to_guest(d, 47, "eth"); > > - /* Enable second stage translation */ > - WRITE_SYSREG(READ_SYSREG(HCR_EL2) | HCR_VM, HCR_EL2); > - isb(); > - > /* The following loads use the domain''s p2m */ > p2m_load_VTTBR(d); > > @@ -373,9 +369,6 @@ int construct_dom0(struct domain *d) > } > #endif > > - WRITE_SYSREG(HCR_PTW|HCR_BSU_OUTER|HCR_AMO|HCR_IMO|HCR_VM|HCR_TWI, HCR_EL2); > - isb(); > - > local_abort_enable(); > > return 0; > diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c > index 9661f10..ba3140d 100644 > --- a/xen/arch/arm/mm.c > +++ b/xen/arch/arm/mm.c > @@ -251,6 +251,16 @@ void __init arch_init_memory(void) > BUG_ON(IS_ERR(dom_cow)); > } > > +void __cpuinit setup_virt_paging(void) > +{ > + /* Setup Stage 2 address translation */ > + /* SH0=00, ORGN0=IRGN0=01 > + * SL0=01 (Level-1) > + * T0SZ=(1)1000 = -8 (40 bit physical addresses) > + */ > + WRITE_SYSREG32(0x80002558, VTCR_EL2); isb(); > +} > + > /* Boot-time pagetable setup. > * Changes here may need matching changes in head.S */ > void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr) > diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c > index 566f36c..cfe3d94 100644 > --- a/xen/arch/arm/setup.c > +++ b/xen/arch/arm/setup.c > @@ -446,16 +446,9 @@ void __init start_xen(unsigned long boot_phys_offset, > set_current((struct vcpu *)0xfffff000); /* debug sanity */ > idle_vcpu[0] = current; > > - /* Setup Hyp vector base */ > - WRITE_SYSREG((vaddr_t)hyp_traps_vector, VBAR_EL2); > - isb(); > - > - /* Setup Stage 2 address translation */ > - /* SH0=00, ORGN0=IRGN0=01 > - * SL0=01 (Level-1) > - * T0SZ=(1)1000 = -8 (40 bit physical addresses) > - */ > - WRITE_SYSREG32(0x80002558, VTCR_EL2); isb(); > + init_traps(); > + > + setup_virt_paging(); > > enable_vfp(); > > diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c > index b2af42e..1bebf86 100644 > --- a/xen/arch/arm/smpboot.c > +++ b/xen/arch/arm/smpboot.c > @@ -148,8 +148,9 @@ void __cpuinit start_secondary(unsigned long boot_phys_offset, > *c = boot_cpu_data; > identify_cpu(c); > > - /* Setup Hyp vector base */ > - WRITE_SYSREG((vaddr_t)hyp_traps_vector, VBAR_EL2); > + init_traps(); > + > + setup_virt_paging(); > > mmu_init_secondary_cpu(); > enable_vfp(); > diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c > index cf28974..2cc31ab 100644 > --- a/xen/arch/arm/traps.c > +++ b/xen/arch/arm/traps.c > @@ -53,6 +53,17 @@ integer_param("debug_stack_lines", debug_stack_lines); > > #define stack_words_per_line 8 > > + > +void __cpuinit init_traps(void) > +{ > + /* Setup Hyp vector base */ > + WRITE_SYSREG((vaddr_t)hyp_traps_vector, VBAR_EL2); > + > + /* Setup hypervisor traps */ > + WRITE_SYSREG(HCR_PTW|HCR_BSU_OUTER|HCR_AMO|HCR_IMO|HCR_VM|HCR_TWI, HCR_EL2); > + isb(); > +} > + > asmlinkage void __div0(void) > { > printk("Division by zero in hypervisor.\n"); > diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h > index 19e5bc2..4be383e 100644 > --- a/xen/include/asm-arm/mm.h > +++ b/xen/include/asm-arm/mm.h > @@ -138,8 +138,10 @@ extern unsigned long total_pages; > > /* Boot-time pagetable setup */ > extern void setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr); > -/* MMU setup for seccondary CPUS (which already have paging enabled) */ > +/* MMU setup for secondary CPUS (which already have paging enabled) */ > extern void __cpuinit mmu_init_secondary_cpu(void); > +/* Second stage paging setup, to be called on all CPUs */ > +extern void __cpuinit setup_virt_paging(void); > /* Set up the xenheap: up to 1GB of contiguous, always-mapped memory. > * Base must be 32MB aligned and size a multiple of 32MB. */ > extern void setup_xenheap_mappings(unsigned long base_mfn, unsigned long nr_mfns); > diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h > index 6fbe2fa..1681ebf 100644 > --- a/xen/include/asm-arm/processor.h > +++ b/xen/include/asm-arm/processor.h > @@ -353,6 +353,8 @@ union hsr { > #ifndef __ASSEMBLY__ > extern uint32_t hyp_traps_vector[]; > > +void init_traps(void); > + > void panic_PAR(uint64_t par); > > void show_execution_state(struct cpu_user_regs *regs); > -- > 1.7.10.4 >
On Wed, 6 Mar 2013, Ian Campbell wrote:> The following series makes host SMP work for both arm32 and arm64. > Tested in both cases with the models and 2 pCPUS. >I get the following error trying to boot Xen with this patch series applied and a 3.9-rc3 based Dom0 on the arm32 emulator: (XEN) Hypervisor Trap. HSR=0x7e00000 EC=0x1 IL=1 Syndrome=1e00000 (XEN) CPU0: Unexpected Trap: Hypervisor (XEN) ----[ Xen-4.3-unstable arm32 debug=y Not tainted ]---- (XEN) CPU: 0 (XEN) PC: c0017764 (XEN) CPSR: 60000193 MODE:32-bit Guest SVC (XEN) R0: c05f96a0 R1: 00000000 R2: 00000001 R3: 00000000 (XEN) R4: c04a2000 R5: c04cb148 R6: c03907d4 R7: c05f6500 (XEN) R8: 8000406a R9: 412fc0f0 R10:00000000 R11:00000000 R12:00000000 (XEN) USR: SP: 00000000 LR: 00000000 (XEN) SVC: SP: c04a3fb8 LR: c000ead0 SPSR:60000153 (XEN) ABT: SP: c04cb28c LR: c000d4e0 SPSR:600001d3 (XEN) UND: SP: c04cb298 LR: c04cb298 SPSR:00000000 (XEN) IRQ: SP: c04cb280 LR: c000d540 SPSR:600001d3 (XEN) FIQ: SP: 00000000 LR: 00000000 SPSR:00000000 (XEN) FIQ: R8: 00000000 R9: 00000000 R10:00000000 R11:00000000 R12:00000000 (XEN) (XEN) TTBR0 008000406a TTBR1 008000406a TCR 00000000 (XEN) SCTLR 10c53c7d (XEN) IFAR 00000000 DFAR 00000000 (XEN) (XEN) HTTBR ffeca000 (XEN) HDFAR c880082c (XEN) HIFAR 0 (XEN) HPFAR 2c0010 (XEN) HCR 00002835 (XEN) HSR 7e00000 (XEN) VTTBR 10000ffdfe000 (XEN) (XEN) DFSR 5 DFAR 0 (XEN) IFSR 0 IFAR 0 (XEN) (XEN) GUEST STACK GOES HERE
On Fri, 22 Mar 2013, Stefano Stabellini wrote:> On Wed, 6 Mar 2013, Ian Campbell wrote: > > The following series makes host SMP work for both arm32 and arm64. > > Tested in both cases with the models and 2 pCPUS. > > > > I get the following error trying to boot Xen with this patch series > applied and a 3.9-rc3 based Dom0 on the arm32 emulator:Never mind, I didn''t have my WFI patch applied.
Ian Campbell
2013-Apr-09 14:39 UTC
Re: [PATCH 7/9] arm: vgic: fix race between evtchn upcall and evtchnop_send
On Tue, 2013-03-19 at 16:18 +0000, Stefano Stabellini wrote:> On Wed, 6 Mar 2013, Ian Campbell wrote: > > From: Ian Campbell <ian.campbell@citrix.com> > > > > On ARM the evtchn upcall is done by using a local PPI interrupt. However the > > guest will clear the evtchn_upcall_pending bit before it EOIs that PPI (which > > happens late). This means vgic_vcpu_inject_irq (called via > > vcpu_mark_events_pending) sees the PPI as in flight and ends up not reinjecting > > it, if this happens after the guest has finished its event channel processing > > loop but before the EOI then we have lost the upcall. > > > > We therefore also need to call gic_restore_pending_irqs on the exit to guest > > path in order to pickup any newly inject IRQ and propagate it into a free LR. > > You forgot to mention that on exit to guest, before calling > gic_restore_pending_irqs, we check whether we need to reinject the PPI.So I did. My Use of "We therefore.." suggests I''ve managed to drop a paragraph from the middle of the commit message, which probably would have mentioned this ;-)> Aside from this, the patch is fine.Thanks.> > > > This doesn''t currently support bumping a lower priority interrupt out of the > > LRs in order to inject a new higher priority interrupt. We don''t yet implement > > interrupt prioritisation (and guests don''t use it either) so this will do for > > now. > > > > Since gic_restore_pending_irqs is now called in the return to guest path it is > > called with interrupts disabled and accordinly must use the irqsave/irqrestore > > spinlock primitives. > > > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > > --- > > xen/arch/arm/gic.c | 9 +++++++-- > > 1 file changed, 7 insertions(+), 2 deletions(-) > > > > diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c > > index 6592562..59e007a 100644 > > --- a/xen/arch/arm/gic.c > > +++ b/xen/arch/arm/gic.c > > @@ -556,17 +556,18 @@ static void gic_restore_pending_irqs(struct vcpu *v) > > { > > int i; > > struct pending_irq *p, *t; > > + unsigned long flags; > > > > list_for_each_entry_safe ( p, t, &v->arch.vgic.lr_pending, lr_queue ) > > { > > i = find_first_zero_bit(&this_cpu(lr_mask), nr_lrs); > > if ( i >= nr_lrs ) return; > > > > - spin_lock_irq(&gic.lock); > > + spin_lock_irqsave(&gic.lock, flags); > > gic_set_lr(i, p->irq, GICH_LR_PENDING, p->priority); > > list_del_init(&p->lr_queue); > > set_bit(i, &this_cpu(lr_mask)); > > - spin_unlock_irq(&gic.lock); > > + spin_unlock_irqrestore(&gic.lock, flags); > > } > > > > } > > @@ -589,6 +590,10 @@ static void gic_inject_irq_stop(void) > > > > void gic_inject(void) > > { > > + if ( vcpu_info(current, evtchn_upcall_pending) ) > > + vgic_vcpu_inject_irq(current, VGIC_IRQ_EVTCHN_CALLBACK, 1); > > + > > + gic_restore_pending_irqs(current); > > if (!this_cpu(lr_mask)) > > gic_inject_irq_stop(); > > else > > -- > > 1.7.10.4 > >
Ian Campbell
2013-Apr-09 14:51 UTC
Re: [PATCH 3/9] arm: gic: implement IPIs using SGI mechanism
On Tue, 2013-03-19 at 17:28 +0000, Stefano Stabellini wrote:> On Wed, 6 Mar 2013, Ian Campbell wrote: > > From: Ian Campbell <ian.campbell@citrix.com> > > > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > > --- > > xen/arch/arm/arm32/mode_switch.S | 2 +- > > xen/arch/arm/gic.c | 88 +++++++++++++++++++++++++++++++++++--- > > xen/arch/arm/smp.c | 14 +++--- > > xen/include/asm-arm/gic.h | 22 +++++++++- > > 4 files changed, 108 insertions(+), 18 deletions(-) > > > > diff --git a/xen/arch/arm/arm32/mode_switch.S b/xen/arch/arm/arm32/mode_switch.S > > index bc2be74..d6741d0 100644 > > --- a/xen/arch/arm/arm32/mode_switch.S > > +++ b/xen/arch/arm/arm32/mode_switch.S > > @@ -43,7 +43,7 @@ kick_cpus: > > mov r2, #0x1 > > str r2, [r0, #(GICD_CTLR * 4)] /* enable distributor */ > > mov r2, #0xfe0000 > > - str r2, [r0, #(GICD_SGIR * 4)] /* send IPI to everybody */ > > + str r2, [r0, #(GICD_SGIR * 4)] /* send IPI to everybody, SGI0 = Event check */ > > dsb > > str r2, [r0, #(GICD_CTLR * 4)] /* disable distributor */ > > mov pc, lr > > diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c > > index bbb8e04..6592562 100644 > > --- a/xen/arch/arm/gic.c > > +++ b/xen/arch/arm/gic.c > > @@ -354,10 +354,55 @@ void __init gic_init(void) > > spin_unlock(&gic.lock); > > } > > > > +void send_SGI_mask(const cpumask_t *cpumask, enum gic_sgi sgi) > > +{ > > + unsigned long mask = cpumask_bits(cpumask)[0]; > > + > > + ASSERT(sgi < 16); /* There are only 16 SGIs */ > > + > > + mask &= cpumask_bits(&cpu_online_map)[0]; > > + > > + ASSERT(mask < 0x100); /* The target bitmap only supports 8 CPUs */ > > + > > + dsb(); > > + > > + GICD[GICD_SGIR] = GICD_SGI_TARGET_LIST > > + | (mask<<GICD_SGI_TARGET_SHIFT) > > + | GICD_SGI_GROUP1 > > Are you sure that this is correct? > > The manual says: "This field is writable only by a Secure access. Any > Non-secure write to the GICD_SGIR generates an SGI only if the specified > SGI is programmed as Group 1, regardless of the value of bit[15] of the > write."I think the "regardless of the value..." bit at the end makes it OK (but pointless) to include this bit, but...> I think that we should leave bit 15 alone.... I think this is a good idea.> > diff --git a/xen/arch/arm/smp.c b/xen/arch/arm/smp.c > > index 12260f4..a902d84 100644 > > --- a/xen/arch/arm/smp.c > > +++ b/xen/arch/arm/smp.c > > @@ -3,10 +3,11 @@ > > #include <asm/smp.h> > > #include <asm/cpregs.h> > > #include <asm/page.h> > > +#include <asm/gic.h> > > > > void flush_tlb_mask(const cpumask_t *mask) > > { > > - /* XXX IPI other processors */ > > + /* No need to IPI other processors on ARM, the processor takes care of it. */ > > flush_xen_data_tlb(); > > From the ARMv7 manual, chapter B3.10.5: > > "For an ARMv7 implementation that does not include the Multiprocessing > Extensions, the architecture defines that a TLB maintenance operation > applies only to any TLBs that are used in translating memory accesses > made by the processor performing the maintenance operation. > > The ARMv7 Multiprocessing Extensions are an OPTIONAL set of extensions > that improve the implementation of a multiprocessor system. These > extensions provide additional TLB maintenance operations that apply to > the TLBs of processors in the same Inner Shareable domain."NB that we only do SMP on systems with MP extensions. I suppose systems with multiple processors but not MP extensions are completely non-coherent ones. In this case the "other" processor looks more like device not another processor. e..g like an A class core with an M class core on the side or a device which happens to have a general purpose core in it.> From this paragraph I understand that we wouldn''t need to do an IPI if > flush_xen_data_tlb was implemented using TLBIALLHIS. But actually > flush_xen_data_tlb uses TLBIALLH, that it seems not to propagate across > an Inner Shareable domain. > Did I misunderstand something?TLBIALLH flushes further than TLBIALLHIS AIUI, so *H is always sufficient if *HIS is sufficient. Xen currently maps RAM (and does MMU walks etc) as outer-shareable but actually we should switch to inner-shareable since it will perform better (by not flushing so aggressively). FWIW Linux uses inner-shareable so I doubt we will see many platforms that we care about where we need to map stuff inner-shareable, even though the meaning of inner- and outer- is somewhat implementation defined. Ian.
On Wed, 2013-03-06 at 08:54 +0000, Ian Campbell wrote:> From: Ian Campbell <ian.campbell@citrix.com> > > The current cpuid is held in x22 not x12.Stefano, This one is lacking any Ack or comment, did you just miss it by mistake? Ian.> > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > --- > xen/arch/arm/arm64/head.S | 2 +- > 1 file changed, 1 insertion(+), 1 deletion(-) > > diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S > index b7ab251..e5c00be 100644 > --- a/xen/arch/arm/arm64/head.S > +++ b/xen/arch/arm/arm64/head.S > @@ -307,7 +307,7 @@ paging: > dsb sy > ldr x0, =smp_up_cpu > ldr x1, [x0] /* Which CPU is being booted? */ > - cmp x1, x12 /* Is it us? */ > + cmp x1, x22 /* Is it us? */ > b.ne 1b > > launch:
Ian Campbell
2013-Apr-11 08:58 UTC
Re: [PATCH 1/9] arm: initialise VCPU SCTLR in vcpu_initialise
On Tue, 2013-03-19 at 15:58 +0000, Stefano Stabellini wrote:> On Wed, 6 Mar 2013, Ian Campbell wrote: > > From: Ian Campbell <ian.campbell@citrix.com> > > > > Ensuring a sane initial starting state for vcpus other than domain 0s. > > > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > > Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>Applied, thanks.
Ian Campbell
2013-Apr-11 08:58 UTC
Re: [PATCH 2/9] arm: consolidate setup of hypervisor traps and second stage paging
On Tue, 2013-03-19 at 17:34 +0000, Stefano Stabellini wrote:> On Wed, 6 Mar 2013, Ian Campbell wrote: > > From: Ian Campbell <ian.campbell@citrix.com> > > > > In particular be sure to initisalise HCR_EL2 on secondary processors. > > > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > > Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>Thanks applied. I had to adjust with s/HCR_TWI// since "xen/arm: trap guest WFI" is not yet in tree.
On Tue, 2013-03-19 at 16:06 +0000, Stefano Stabellini wrote:> On Wed, 6 Mar 2013, Ian Campbell wrote: > > From: Ian Campbell <ian.campbell@citrix.com> > > > > At least we were consistent, but lets be correct too > > > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > > Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>Applied, thanks.
Ian Campbell
2013-Apr-11 08:59 UTC
Re: [PATCH 6/9] arm: vgic: fix race in vgic_vcpu_inject_irq
On Tue, 2013-03-19 at 16:10 +0000, Stefano Stabellini wrote:> On Wed, 6 Mar 2013, Ian Campbell wrote: > > From: Ian Campbell <ian.campbell@citrix.com> > > > > The initial check for a still pending interrupt (!list_empty(&n->inflight)) > > needs to be covered by the vgic lock to avoid trying to insert the IRQ into the > > inflight list simultaneously on 2 pCPUS. Expand the area covered by the lock > > appropriately. > > > > Also consolidate the unlocks on the exit path into one location. > > > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > > Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>This depends on the trap WFI patch, because it turns out I made exactly the cleanup I suggested in my review of that patch ;-) Ian.
Ian Campbell
2013-Apr-11 08:59 UTC
Re: [PATCH 7/9] arm: vgic: fix race between evtchn upcall and evtchnop_send
On Tue, 2013-03-19 at 16:18 +0000, Stefano Stabellini wrote:> On Wed, 6 Mar 2013, Ian Campbell wrote: > > From: Ian Campbell <ian.campbell@citrix.com> > > > > On ARM the evtchn upcall is done by using a local PPI interrupt. However the > > guest will clear the evtchn_upcall_pending bit before it EOIs that PPI (which > > happens late). This means vgic_vcpu_inject_irq (called via > > vcpu_mark_events_pending) sees the PPI as in flight and ends up not reinjecting > > it, if this happens after the guest has finished its event channel processing > > loop but before the EOI then we have lost the upcall. > > > > We therefore also need to call gic_restore_pending_irqs on the exit to guest > > path in order to pickup any newly inject IRQ and propagate it into a free LR. > > You forgot to mention that on exit to guest, before calling > gic_restore_pending_irqs, we check whether we need to reinject the PPI.I inserted before "We therefore..." To fix this we need to check if an evtchn upcall is pending when returning the guest and if so reinject the PPI.> Aside from this, the patch is fine.And then I took this as an Acked-by and applied. Thanks
On Tue, 2013-03-19 at 16:05 +0000, Stefano Stabellini wrote:> On Wed, 6 Mar 2013, Ian Campbell wrote: > > From: Ian Campbell <ian.campbell@citrix.com> > > > > lr_mask is a uint64_t and so needs to be printed with PRIx64. > > > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > > Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>Applied, thanks. Ian
Ian Campbell
2013-Apr-11 09:00 UTC
Re: [PATCH 9/9] arm: tweak/improve logging for host SMP
On Tue, 2013-03-19 at 16:05 +0000, Stefano Stabellini wrote:> On Wed, 6 Mar 2013, Ian Campbell wrote: > > From: Ian Campbell <ian.campbell@citrix.com> > > > > Make the "CPU<n> booted" message fit in with the surrounding logging. > > > > Log which CPU is taking an unexpected trap. > > > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > > Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>Applied, thanks.
Ian Campbell
2013-Apr-11 15:53 UTC
Re: [PATCH 3/9] arm: gic: implement IPIs using SGI mechanism
On Tue, 2013-04-09 at 15:51 +0100, Ian Campbell wrote:> NB that we only do SMP on systems with MP extensions.Also the virt extensions require the MP extensions... Ian.
Ian Campbell
2013-Apr-11 16:17 UTC
Re: [PATCH 3/9] arm: gic: implement IPIs using SGI mechanism
On Tue, 2013-04-09 at 15:51 +0100, Ian Campbell wrote:> > > I think that we should leave bit 15 alone. > > ... I think this is a good idea.8<--------------------------------- From fe8f63c211f6f4a86f99dd72d770524a3816792f Mon Sep 17 00:00:00 2001 From: Ian Campbell <ian.campbell@citrix.com> Date: Wed, 27 Feb 2013 16:34:32 +0000 Subject: [PATCH] arm: gic: implement IPIs using SGI mechanism Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- v2: Drop GROUP1 bit, it has no meaning unless you are in secure mode. --- xen/arch/arm/arm32/mode_switch.S | 2 +- xen/arch/arm/gic.c | 85 +++++++++++++++++++++++++++++++++++--- xen/arch/arm/smp.c | 14 ++---- xen/include/asm-arm/gic.h | 22 +++++++++- 4 files changed, 105 insertions(+), 18 deletions(-) diff --git a/xen/arch/arm/arm32/mode_switch.S b/xen/arch/arm/arm32/mode_switch.S index bc2be74..d6741d0 100644 --- a/xen/arch/arm/arm32/mode_switch.S +++ b/xen/arch/arm/arm32/mode_switch.S @@ -43,7 +43,7 @@ kick_cpus: mov r2, #0x1 str r2, [r0, #(GICD_CTLR * 4)] /* enable distributor */ mov r2, #0xfe0000 - str r2, [r0, #(GICD_SGIR * 4)] /* send IPI to everybody */ + str r2, [r0, #(GICD_SGIR * 4)] /* send IPI to everybody, SGI0 = Event check */ dsb str r2, [r0, #(GICD_CTLR * 4)] /* disable distributor */ mov pc, lr diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c index 12f2e7f..07c816b 100644 --- a/xen/arch/arm/gic.c +++ b/xen/arch/arm/gic.c @@ -356,10 +356,52 @@ void __init gic_init(void) spin_unlock(&gic.lock); } +void send_SGI_mask(const cpumask_t *cpumask, enum gic_sgi sgi) +{ + unsigned long mask = cpumask_bits(cpumask)[0]; + + ASSERT(sgi < 16); /* There are only 16 SGIs */ + + mask &= cpumask_bits(&cpu_online_map)[0]; + + ASSERT(mask < 0x100); /* The target bitmap only supports 8 CPUs */ + + dsb(); + + GICD[GICD_SGIR] = GICD_SGI_TARGET_LIST + | (mask<<GICD_SGI_TARGET_SHIFT) + | sgi; +} + +void send_SGI_one(unsigned int cpu, enum gic_sgi sgi) +{ + ASSERT(cpu < 7); /* Targets bitmap only supports 8 CPUs */ + send_SGI_mask(cpumask_of(cpu), sgi); +} + +void send_SGI_self(enum gic_sgi sgi) +{ + ASSERT(sgi < 16); /* There are only 16 SGIs */ + + dsb(); + + GICD[GICD_SGIR] = GICD_SGI_TARGET_SELF + | sgi; +} + +void send_SGI_allbutself(enum gic_sgi sgi) +{ + ASSERT(sgi < 16); /* There are only 16 SGIs */ + + dsb(); + + GICD[GICD_SGIR] = GICD_SGI_TARGET_OTHERS + | sgi; +} + void smp_send_state_dump(unsigned int cpu) { - printk("WARNING: unable to send state dump request to CPU%d\n", cpu); - /* XXX TODO -- send an SGI */ + send_SGI_one(cpu, GIC_SGI_DUMP_STATE); } /* Set up the per-CPU parts of the GIC for a secondary CPU */ @@ -592,6 +634,28 @@ out: return retval; } +static void do_sgi(struct cpu_user_regs *regs, int othercpu, enum gic_sgi sgi) +{ + /* Lower the priority */ + GICC[GICC_EOIR] = sgi; + + switch (sgi) + { + case GIC_SGI_EVENT_CHECK: + /* Nothing to do, will check for events on return path */ + break; + case GIC_SGI_DUMP_STATE: + dump_execstate(regs); + break; + default: + panic("Unhandled SGI %d on CPU%d\n", sgi, smp_processor_id()); + break; + } + + /* Deactivate */ + GICC[GICC_DIR] = sgi; +} + /* Accept an interrupt from the GIC and dispatch its handler */ void gic_interrupt(struct cpu_user_regs *regs, int is_fiq) { @@ -602,14 +666,23 @@ void gic_interrupt(struct cpu_user_regs *regs, int is_fiq) do { intack = GICC[GICC_IAR]; irq = intack & GICC_IA_IRQ; - local_irq_enable(); - if (likely(irq < 1021)) + if ( likely(irq >= 16 && irq < 1021) ) + { + local_irq_enable(); do_IRQ(regs, irq, is_fiq); + local_irq_disable(); + } + else if (unlikely(irq < 16)) + { + unsigned int cpu = (intack & GICC_IA_CPU_MASK) >> GICC_IA_CPU_SHIFT; + do_sgi(regs, cpu, irq); + } else + { + local_irq_disable(); break; - - local_irq_disable(); + } } while (1); } diff --git a/xen/arch/arm/smp.c b/xen/arch/arm/smp.c index 12260f4..a902d84 100644 --- a/xen/arch/arm/smp.c +++ b/xen/arch/arm/smp.c @@ -3,10 +3,11 @@ #include <asm/smp.h> #include <asm/cpregs.h> #include <asm/page.h> +#include <asm/gic.h> void flush_tlb_mask(const cpumask_t *mask) { - /* XXX IPI other processors */ + /* No need to IPI other processors on ARM, the processor takes care of it. */ flush_xen_data_tlb(); } @@ -15,17 +16,12 @@ void smp_call_function( void *info, int wait) { - /* TODO: No SMP just now, does not include self so nothing to do. - cpumask_t allbutself = cpu_online_map; - cpu_clear(smp_processor_id(), allbutself); - on_selected_cpus(&allbutself, func, info, wait); - */ + panic("%s not implmented\n", __func__); } + void smp_send_event_check_mask(const cpumask_t *mask) { - /* TODO: No SMP just now, does not include self so nothing to do. - send_IPI_mask(mask, EVENT_CHECK_VECTOR); - */ + send_SGI_mask(mask, GIC_SGI_EVENT_CHECK); } /* diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h index 6bf50bb..24c0d5c 100644 --- a/xen/include/asm-arm/gic.h +++ b/xen/include/asm-arm/gic.h @@ -51,6 +51,13 @@ #define GICD_SPENDSGIRN (0xF2C/4) #define GICD_ICPIDR2 (0xFE8/4) +#define GICD_SGI_TARGET_LIST (0UL<<24) +#define GICD_SGI_TARGET_OTHERS (1UL<<24) +#define GICD_SGI_TARGET_SELF (2UL<<24) +#define GICD_SGI_TARGET_SHIFT (16) +#define GICD_SGI_TARGET_MASK (0xFFUL<<GICD_SGI_TARGET_SHIFT) +#define GICD_SGI_GROUP1 (1UL<<15) + #define GICC_CTLR (0x0000/4) #define GICC_PMR (0x0004/4) #define GICC_BPR (0x0008/4) @@ -83,8 +90,9 @@ #define GICC_CTL_ENABLE 0x1 #define GICC_CTL_EOI (0x1 << 9) -#define GICC_IA_IRQ 0x03ff -#define GICC_IA_CPU 0x1c00 +#define GICC_IA_IRQ 0x03ff +#define GICC_IA_CPU_MASK 0x1c00 +#define GICC_IA_CPU_SHIFT 10 #define GICH_HCR_EN (1 << 0) #define GICH_HCR_UIE (1 << 1) @@ -157,6 +165,16 @@ extern int gicv_setup(struct domain *d); extern void gic_save_state(struct vcpu *v); extern void gic_restore_state(struct vcpu *v); +/* SGI (AKA IPIs) */ +enum gic_sgi { + GIC_SGI_EVENT_CHECK = 0, + GIC_SGI_DUMP_STATE = 1, +}; +extern void send_SGI_mask(const cpumask_t *cpumask, enum gic_sgi sgi); +extern void send_SGI_one(unsigned int cpu, enum gic_sgi sgi); +extern void send_SGI_self(enum gic_sgi sgi); +extern void send_SGI_allbutself(enum gic_sgi sgi); + /* print useful debug info */ extern void gic_dump_info(struct vcpu *v); -- 1.7.2.5
Julien Grall
2013-Apr-11 22:40 UTC
Re: [PATCH 3/9] arm: gic: implement IPIs using SGI mechanism
On 04/11/2013 05:17 PM, Ian Campbell wrote:> @@ -15,17 +16,12 @@ void smp_call_function( > void *info, > int wait) > { > - /* TODO: No SMP just now, does not include self so nothing to do. > - cpumask_t allbutself = cpu_online_map; > - cpu_clear(smp_processor_id(), allbutself); > - on_selected_cpus(&allbutself, func, info, wait); > - */ > + panic("%s not implmented\n", __func__); > } > +Panic calls smp_call_function via machine_halt function. So, if Xen panics, you will have an infinite loop which writes "%s not ....". For the moment, I think you should let the function in its current state. I will try to make a patch to call a function on each CPU with ARM. Cheers, Julien
Ian Campbell
2013-Apr-12 08:00 UTC
Re: [PATCH 3/9] arm: gic: implement IPIs using SGI mechanism
On Thu, 2013-04-11 at 23:40 +0100, Julien Grall wrote:> On 04/11/2013 05:17 PM, Ian Campbell wrote: > > > @@ -15,17 +16,12 @@ void smp_call_function( > > void *info, > > int wait) > > { > > - /* TODO: No SMP just now, does not include self so nothing to do. > > - cpumask_t allbutself = cpu_online_map; > > - cpu_clear(smp_processor_id(), allbutself); > > - on_selected_cpus(&allbutself, func, info, wait); > > - */ > > + panic("%s not implmented\n", __func__); > > } > > + > > > > Panic calls smp_call_function via machine_halt function. > So, if Xen panics, you will have an infinite loop which writes "%s not > ....". > For the moment, I think you should let the function in its current state.I''ll s/panic/printk/> I will try to make a patch to call a function on each CPU with ARM.Thanks. Ian.
Stefano Stabellini
2013-Apr-18 13:33 UTC
Re: [PATCH 4/9] arm64: correct secondary CPU bringup
On Thu, 11 Apr 2013, Ian Campbell wrote:> On Wed, 2013-03-06 at 08:54 +0000, Ian Campbell wrote: > > From: Ian Campbell <ian.campbell@citrix.com> > > > > The current cpuid is held in x22 not x12. > > Stefano, > > This one is lacking any Ack or comment, did you just miss it by mistake? >Yes. Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>> > > > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > > --- > > xen/arch/arm/arm64/head.S | 2 +- > > 1 file changed, 1 insertion(+), 1 deletion(-) > > > > diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S > > index b7ab251..e5c00be 100644 > > --- a/xen/arch/arm/arm64/head.S > > +++ b/xen/arch/arm/arm64/head.S > > @@ -307,7 +307,7 @@ paging: > > dsb sy > > ldr x0, =smp_up_cpu > > ldr x1, [x0] /* Which CPU is being booted? */ > > - cmp x1, x12 /* Is it us? */ > > + cmp x1, x22 /* Is it us? */ > > b.ne 1b > > > > launch: > > >