Stefano Stabellini
2013-May-08 11:40 UTC
[PATCH v8 0/7] xen/arm: guest SMP support, part two
Hi all, this patch series is just a rebased of the last set of unapplied patches from the original series. See each patch for a detailed changelog. Stefano Stabellini (7): xen: move VCPUOP_register_vcpu_info to common code xen/arm: support VCPUOP_register_vcpu_info. xen/arm: send IPIs to inject irqs into guest vcpus running on different pcpus xen/arm: run the vtimer Xen timers on the pcpu the vcpu is running on xen/arm: initialize virt_timer and phys_timer with the same values on all vcpus xen/arm: clear pending irq queues on do_psci_cpu_on xen/arm: initialize vtimer offset to CNTPCT xen/arch/arm/domain.c | 16 ++++++ xen/arch/arm/gic.c | 12 ++++ xen/arch/arm/psci.c | 3 + xen/arch/arm/traps.c | 6 ++ xen/arch/arm/vgic.c | 20 ++++++- xen/arch/arm/vtimer.c | 29 +++++++---- xen/arch/arm/vtimer.h | 1 + xen/arch/x86/domain.c | 113 --------------------------------------- xen/common/domain.c | 111 ++++++++++++++++++++++++++++++++++++++ xen/include/asm-arm/domain.h | 24 +++++--- xen/include/asm-arm/gic.h | 2 + xen/include/asm-arm/hypercall.h | 2 + xen/include/asm-x86/domain.h | 3 - xen/include/xen/domain.h | 3 + xen/include/xen/sched.h | 3 + 15 files changed, 211 insertions(+), 137 deletions(-) Cheers, Stefano
Stefano Stabellini
2013-May-08 11:41 UTC
[PATCH v8 1/7] xen: move VCPUOP_register_vcpu_info to common code
Move the implementation of VCPUOP_register_vcpu_info from x86 specific to commmon code. Move vcpu_info_mfn from an arch specific vcpu sub-field to the common vcpu struct. Move the initialization of vcpu_info_mfn to common code. Move unmap_vcpu_info and the call to unmap_vcpu_info at domain destruction time to common code. Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Acked-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Keir Fraser <keir@xen.org> CC: keir@xen.org CC: JBeulich@suse.com --- xen/arch/x86/domain.c | 113 ------------------------------------------ xen/common/domain.c | 111 +++++++++++++++++++++++++++++++++++++++++ xen/include/asm-x86/domain.h | 3 - xen/include/xen/domain.h | 3 + xen/include/xen/sched.h | 3 + 5 files changed, 117 insertions(+), 116 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index db1e65d..0228db2 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -385,8 +385,6 @@ int vcpu_initialise(struct vcpu *v) vmce_init_vcpu(v); - v->arch.vcpu_info_mfn = INVALID_MFN; - if ( is_hvm_domain(d) ) { rc = hvm_vcpu_initialise(v); @@ -960,99 +958,6 @@ int arch_vcpu_reset(struct vcpu *v) return 0; } -/* - * Unmap the vcpu info page if the guest decided to place it somewhere - * else. This is only used from arch_domain_destroy, so there''s no - * need to do anything clever. - */ -static void -unmap_vcpu_info(struct vcpu *v) -{ - unsigned long mfn; - - if ( v->arch.vcpu_info_mfn == INVALID_MFN ) - return; - - mfn = v->arch.vcpu_info_mfn; - unmap_domain_page_global(v->vcpu_info); - - v->vcpu_info = &dummy_vcpu_info; - v->arch.vcpu_info_mfn = INVALID_MFN; - - put_page_and_type(mfn_to_page(mfn)); -} - -/* - * Map a guest page in and point the vcpu_info pointer at it. This - * makes sure that the vcpu_info is always pointing at a valid piece - * of memory, and it sets a pending event to make sure that a pending - * event doesn''t get missed. - */ -static int -map_vcpu_info(struct vcpu *v, unsigned long gfn, unsigned offset) -{ - struct domain *d = v->domain; - void *mapping; - vcpu_info_t *new_info; - struct page_info *page; - int i; - - if ( offset > (PAGE_SIZE - sizeof(vcpu_info_t)) ) - return -EINVAL; - - if ( v->arch.vcpu_info_mfn != INVALID_MFN ) - return -EINVAL; - - /* Run this command on yourself or on other offline VCPUS. */ - if ( (v != current) && !test_bit(_VPF_down, &v->pause_flags) ) - return -EINVAL; - - page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC); - if ( !page ) - return -EINVAL; - - if ( !get_page_type(page, PGT_writable_page) ) - { - put_page(page); - return -EINVAL; - } - - mapping = __map_domain_page_global(page); - if ( mapping == NULL ) - { - put_page_and_type(page); - return -ENOMEM; - } - - new_info = (vcpu_info_t *)(mapping + offset); - - if ( v->vcpu_info == &dummy_vcpu_info ) - { - memset(new_info, 0, sizeof(*new_info)); - __vcpu_info(v, new_info, evtchn_upcall_mask) = 1; - } - else - { - memcpy(new_info, v->vcpu_info, sizeof(*new_info)); - } - - v->vcpu_info = new_info; - v->arch.vcpu_info_mfn = page_to_mfn(page); - - /* Set new vcpu_info pointer /before/ setting pending flags. */ - wmb(); - - /* - * Mark everything as being pending just to make sure nothing gets - * lost. The domain will get a spurious event, but it can cope. - */ - vcpu_info(v, evtchn_upcall_pending) = 1; - for ( i = 0; i < BITS_PER_EVTCHN_WORD(d); i++ ) - set_bit(i, &vcpu_info(v, evtchn_pending_sel)); - - return 0; -} - long arch_do_vcpu_op( int cmd, struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg) @@ -1089,22 +994,6 @@ arch_do_vcpu_op( break; } - case VCPUOP_register_vcpu_info: - { - struct domain *d = v->domain; - struct vcpu_register_vcpu_info info; - - rc = -EFAULT; - if ( copy_from_guest(&info, arg, 1) ) - break; - - domain_lock(d); - rc = map_vcpu_info(v, info.mfn, info.offset); - domain_unlock(d); - - break; - } - /* * XXX Disable for 4.0.0: __update_vcpu_system_time() writes to the given * virtual address even when running in another domain''s address space. @@ -1971,8 +1860,6 @@ int domain_relinquish_resources(struct domain *d) ret = vcpu_destroy_pagetables(v); if ( ret ) return ret; - - unmap_vcpu_info(v); } if ( !is_hvm_domain(d) ) diff --git a/xen/common/domain.c b/xen/common/domain.c index 8adf00a..d0b2f2e 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -33,6 +33,7 @@ #include <xen/xenoprof.h> #include <xen/irq.h> #include <asm/debugger.h> +#include <asm/p2m.h> #include <asm/processor.h> #include <public/sched.h> #include <public/sysctl.h> @@ -142,6 +143,7 @@ struct vcpu *alloc_vcpu( v->vcpu_info = ((vcpu_id < XEN_LEGACY_MAX_VCPUS) ? (vcpu_info_t *)&shared_info(d, vcpu_info[vcpu_id]) : &dummy_vcpu_info); + v->vcpu_info_mfn = INVALID_MFN; init_waitqueue_vcpu(v); } @@ -513,6 +515,7 @@ int rcu_lock_live_remote_domain_by_id(domid_t dom, struct domain **d) int domain_kill(struct domain *d) { int rc = 0; + struct vcpu *v; if ( d == current->domain ) return -EINVAL; @@ -537,6 +540,8 @@ int domain_kill(struct domain *d) BUG_ON(rc != -EAGAIN); break; } + for_each_vcpu ( d, v ) + unmap_vcpu_info(v); d->is_dying = DOMDYING_dead; /* Mem event cleanup has to go here because the rings * have to be put before we call put_domain. */ @@ -870,6 +875,96 @@ int vcpu_reset(struct vcpu *v) return rc; } +/* + * Map a guest page in and point the vcpu_info pointer at it. This + * makes sure that the vcpu_info is always pointing at a valid piece + * of memory, and it sets a pending event to make sure that a pending + * event doesn''t get missed. + */ +int map_vcpu_info(struct vcpu *v, unsigned long gfn, unsigned offset) +{ + struct domain *d = v->domain; + void *mapping; + vcpu_info_t *new_info; + struct page_info *page; + int i; + + if ( offset > (PAGE_SIZE - sizeof(vcpu_info_t)) ) + return -EINVAL; + + if ( v->vcpu_info_mfn != INVALID_MFN ) + return -EINVAL; + + /* Run this command on yourself or on other offline VCPUS. */ + if ( (v != current) && !test_bit(_VPF_down, &v->pause_flags) ) + return -EINVAL; + + page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC); + if ( !page ) + return -EINVAL; + + if ( !get_page_type(page, PGT_writable_page) ) + { + put_page(page); + return -EINVAL; + } + + mapping = __map_domain_page_global(page); + if ( mapping == NULL ) + { + put_page_and_type(page); + return -ENOMEM; + } + + new_info = (vcpu_info_t *)(mapping + offset); + + if ( v->vcpu_info == &dummy_vcpu_info ) + { + memset(new_info, 0, sizeof(*new_info)); + __vcpu_info(v, new_info, evtchn_upcall_mask) = 1; + } + else + { + memcpy(new_info, v->vcpu_info, sizeof(*new_info)); + } + + v->vcpu_info = new_info; + v->vcpu_info_mfn = page_to_mfn(page); + + /* Set new vcpu_info pointer /before/ setting pending flags. */ + wmb(); + + /* + * Mark everything as being pending just to make sure nothing gets + * lost. The domain will get a spurious event, but it can cope. + */ + vcpu_info(v, evtchn_upcall_pending) = 1; + for ( i = 0; i < BITS_PER_EVTCHN_WORD(d); i++ ) + set_bit(i, &vcpu_info(v, evtchn_pending_sel)); + + return 0; +} + +/* + * Unmap the vcpu info page if the guest decided to place it somewhere + * else. This is only used from arch_domain_destroy, so there''s no + * need to do anything clever. + */ +void unmap_vcpu_info(struct vcpu *v) +{ + unsigned long mfn; + + if ( v->vcpu_info_mfn == INVALID_MFN ) + return; + + mfn = v->vcpu_info_mfn; + unmap_domain_page_global(v->vcpu_info); + + v->vcpu_info = &dummy_vcpu_info; + v->vcpu_info_mfn = INVALID_MFN; + + put_page_and_type(mfn_to_page(mfn)); +} long do_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg) { @@ -994,6 +1089,22 @@ long do_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg) break; + case VCPUOP_register_vcpu_info: + { + struct domain *d = v->domain; + struct vcpu_register_vcpu_info info; + + rc = -EFAULT; + if ( copy_from_guest(&info, arg, 1) ) + break; + + domain_lock(d); + rc = map_vcpu_info(v, info.mfn, info.offset); + domain_unlock(d); + + break; + } + #ifdef VCPU_TRAP_NMI case VCPUOP_send_nmi: if ( !guest_handle_is_null(arg) ) diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index 83fbe58..d79464d 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -436,9 +436,6 @@ struct arch_vcpu struct paging_vcpu paging; - /* Guest-specified relocation of vcpu_info. */ - unsigned long vcpu_info_mfn; - uint32_t gdbsx_vcpu_event; /* A secondary copy of the vcpu time info. */ diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h index 504a70f..a057069 100644 --- a/xen/include/xen/domain.h +++ b/xen/include/xen/domain.h @@ -52,6 +52,9 @@ void free_pirq_struct(void *); int vcpu_initialise(struct vcpu *v); void vcpu_destroy(struct vcpu *v); +int map_vcpu_info(struct vcpu *v, unsigned long gfn, unsigned offset); +void unmap_vcpu_info(struct vcpu *v); + int arch_domain_create(struct domain *d, unsigned int domcr_flags); void arch_domain_destroy(struct domain *d); diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index 5b55c09..ae6a3b8 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -192,6 +192,9 @@ struct vcpu struct waitqueue_vcpu *waitqueue_vcpu; + /* Guest-specified relocation of vcpu_info. */ + unsigned long vcpu_info_mfn; + struct arch_vcpu arch; }; -- 1.7.2.5
Stefano Stabellini
2013-May-08 11:41 UTC
[PATCH v8 2/7] xen/arm: support VCPUOP_register_vcpu_info.
We don''t want to support the full vcpu_op hypercall interface, just VCPUOP_register_vcpu_info: introduce an internal ARM-only do_arm_vcpu_op function to filter out the vcpu_op hypercalls that we don''t want to support. Call do_arm_vcpu_op instead of do_vcpu_op from traps.c. Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Acked-by: Keir Fraser <keir@xen.org> Acked-by: Ian Campbell <ian.campbell@citrix.com> CC: keir@xen.org CC: JBeulich@suse.com Changes in v4: - introduce an HYPERCALL_ARM macro and use it for do_arm_vcpu_op. Changes in v3: - do not export all the vcpu_op hypercalls to ARM guests, only VCPUOP_register_vcpu_info. --- xen/arch/arm/domain.c | 13 +++++++++++++ xen/arch/arm/traps.c | 6 ++++++ xen/include/asm-arm/hypercall.h | 2 ++ 3 files changed, 21 insertions(+), 0 deletions(-) diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c index 141aa0b..df42d82 100644 --- a/xen/arch/arm/domain.c +++ b/xen/arch/arm/domain.c @@ -10,6 +10,7 @@ * GNU General Public License for more details. */ #include <xen/config.h> +#include <xen/hypercall.h> #include <xen/init.h> #include <xen/lib.h> #include <xen/sched.h> @@ -629,6 +630,18 @@ void arch_dump_domain_info(struct domain *d) } } + +long do_arm_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg) +{ + switch ( cmd ) + { + case VCPUOP_register_vcpu_info: + return do_vcpu_op(cmd, vcpuid, arg); + default: + return -EINVAL; + } +} + long arch_do_vcpu_op(int cmd, struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg) { return -ENOSYS; diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index c743f2c..83a7fbc 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -653,6 +653,11 @@ typedef struct { .nr_args = _nr_args, \ } +#define HYPERCALL_ARM(_name, _nr_args) \ + [ __HYPERVISOR_ ## _name ] = { \ + .fn = (arm_hypercall_fn_t) &do_arm_ ## _name, \ + .nr_args = _nr_args, \ + } static arm_hypercall_t arm_hypercall_table[] = { HYPERCALL(memory_op, 2), HYPERCALL(domctl, 1), @@ -665,6 +670,7 @@ static arm_hypercall_t arm_hypercall_table[] = { HYPERCALL(sysctl, 2), HYPERCALL(hvm_op, 2), HYPERCALL(grant_table_op, 3), + HYPERCALL_ARM(vcpu_op, 3), }; #define __PSCI_cpu_suspend 0 diff --git a/xen/include/asm-arm/hypercall.h b/xen/include/asm-arm/hypercall.h index 0833ec4..3327a96 100644 --- a/xen/include/asm-arm/hypercall.h +++ b/xen/include/asm-arm/hypercall.h @@ -4,6 +4,8 @@ #include <public/domctl.h> /* for arch_do_domctl */ int do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg); +long do_arm_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg); + #endif /* __ASM_ARM_HYPERCALL_H__ */ /* * Local variables: -- 1.7.2.5
Stefano Stabellini
2013-May-08 11:41 UTC
[PATCH v8 3/7] xen/arm: send IPIs to inject irqs into guest vcpus running on different pcpus
If we need to inject an irq into a VCPU that is running on a different processor, we shouldn''t just enqueue the irq into the lr_pending and inflight lists and wait for something to interrupt the guest execution. Send an IPI to the target pcpu so that Xen can inject the new interrupt returning to guest. Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Acked-by: Ian Campbell <ian.campbell@citrix.com> Changes in v4: - check for is_running before vcpu_unblock; - use smp_send_event_check_cpu instead of smp_send_event_check_mask. --- xen/arch/arm/vgic.c | 4 ++++ 1 files changed, 4 insertions(+), 0 deletions(-) diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c index 5d242c8..0e9cc4a 100644 --- a/xen/arch/arm/vgic.c +++ b/xen/arch/arm/vgic.c @@ -648,6 +648,7 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq, int virtual) struct vgic_irq_rank *rank = vgic_irq_rank(v, 8, idx); struct pending_irq *iter, *n = irq_to_pending(v, irq); unsigned long flags; + bool_t running; spin_lock_irqsave(&v->arch.vgic.lock, flags); @@ -683,7 +684,10 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq, int virtual) out: spin_unlock_irqrestore(&v->arch.vgic.lock, flags); /* we have a new higher priority irq, inject it into the guest */ + running = v->is_running; vcpu_unblock(v); + if ( running && v != current ) + smp_send_event_check_mask(cpumask_of(v->processor)); } /* -- 1.7.2.5
Stefano Stabellini
2013-May-08 11:41 UTC
[PATCH v8 4/7] xen/arm: run the vtimer Xen timers on the pcpu the vcpu is running on
The Xen physical timer emulator and virtual timer driver use two internal Xen timers: initialize them on the pcpu the vcpu is running on, rather than the processor that it''s creating the vcpu. On vcpu restore migrate the phys_timer and the virt_timer to the pcpu the vcpu is running on. Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Acked-by: Ian Campbell <ian.campbell@citrix.com> Changes in v4: - migrate Xen timers on virt_timer_restore. --- xen/arch/arm/vtimer.c | 6 ++++-- 1 files changed, 4 insertions(+), 2 deletions(-) diff --git a/xen/arch/arm/vtimer.c b/xen/arch/arm/vtimer.c index 1cb365e..393aac3 100644 --- a/xen/arch/arm/vtimer.c +++ b/xen/arch/arm/vtimer.c @@ -48,7 +48,7 @@ int vcpu_vtimer_init(struct vcpu *v) { struct vtimer *t = &v->arch.phys_timer; - init_timer(&t->timer, phys_timer_expired, t, smp_processor_id()); + init_timer(&t->timer, phys_timer_expired, t, v->processor); t->ctl = 0; t->offset = NOW(); t->cval = NOW(); @@ -56,7 +56,7 @@ int vcpu_vtimer_init(struct vcpu *v) t->v = v; t = &v->arch.virt_timer; - init_timer(&t->timer, virt_timer_expired, t, smp_processor_id()); + init_timer(&t->timer, virt_timer_expired, t, v->processor); t->ctl = 0; t->offset = READ_SYSREG64(CNTVCT_EL0) + READ_SYSREG64(CNTVOFF_EL2); t->cval = 0; @@ -95,6 +95,8 @@ int virt_timer_restore(struct vcpu *v) return 0; stop_timer(&v->arch.virt_timer.timer); + migrate_timer(&v->arch.virt_timer.timer, v->processor); + migrate_timer(&v->arch.phys_timer.timer, v->processor); WRITE_SYSREG64(v->arch.virt_timer.offset, CNTVOFF_EL2); WRITE_SYSREG64(v->arch.virt_timer.cval, CNTV_CVAL_EL0); -- 1.7.2.5
Stefano Stabellini
2013-May-08 11:41 UTC
[PATCH v8 5/7] xen/arm: initialize virt_timer and phys_timer with the same values on all vcpus
Introduce a domain wide vtimer initialization function to initialize the phys_timer and the virt_timer offsets. Use the domain phys_timer and virt_timer offsets throughout the vtimer code instead of the per-vcpu offsets. Remove the per-vcpu offsets from struct vtimer altogether. Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Acked-by: Ian Campbell <ian.campbell@citrix.com> Changes in v5: - don''t change the init value of phys_timer''s cval. Changes in v4: - introduce vcpu_domain_init; - inline phys_timer_base and virt_timer_base in arch_domain; - use phys_timer_base.offset and virt_timer_base.offset directly in vtimer code (remove offset field from struct vtimer). --- xen/arch/arm/domain.c | 3 +++ xen/arch/arm/vtimer.c | 24 ++++++++++++++++-------- xen/arch/arm/vtimer.h | 1 + xen/include/asm-arm/domain.h | 24 +++++++++++++++--------- 4 files changed, 35 insertions(+), 17 deletions(-) diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c index df42d82..b22d902 100644 --- a/xen/arch/arm/domain.c +++ b/xen/arch/arm/domain.c @@ -485,6 +485,9 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags) if ( (rc = domain_vgic_init(d)) != 0 ) goto fail; + if ( (rc = vcpu_domain_init(d)) != 0 ) + goto fail; + /* Domain 0 gets a real UART not an emulated one */ if ( d->domain_id && (rc = domain_uart0_init(d)) != 0 ) goto fail; diff --git a/xen/arch/arm/vtimer.c b/xen/arch/arm/vtimer.c index 393aac3..97fe8ce 100644 --- a/xen/arch/arm/vtimer.c +++ b/xen/arch/arm/vtimer.c @@ -44,13 +44,20 @@ static void virt_timer_expired(void *data) vgic_vcpu_inject_irq(t->v, 27, 1); } +int vcpu_domain_init(struct domain *d) +{ + d->arch.phys_timer_base.offset = NOW(); + d->arch.virt_timer_base.offset = READ_SYSREG64(CNTVCT_EL0) + + READ_SYSREG64(CNTVOFF_EL2); + return 0; +} + int vcpu_vtimer_init(struct vcpu *v) { struct vtimer *t = &v->arch.phys_timer; init_timer(&t->timer, phys_timer_expired, t, v->processor); t->ctl = 0; - t->offset = NOW(); t->cval = NOW(); t->irq = 30; t->v = v; @@ -58,7 +65,6 @@ int vcpu_vtimer_init(struct vcpu *v) t = &v->arch.virt_timer; init_timer(&t->timer, virt_timer_expired, t, v->processor); t->ctl = 0; - t->offset = READ_SYSREG64(CNTVCT_EL0) + READ_SYSREG64(CNTVOFF_EL2); t->cval = 0; t->irq = 27; t->v = v; @@ -84,7 +90,7 @@ int virt_timer_save(struct vcpu *v) !(v->arch.virt_timer.ctl & CNTx_CTL_MASK)) { set_timer(&v->arch.virt_timer.timer, ticks_to_ns(v->arch.virt_timer.cval + - v->arch.virt_timer.offset - boot_count)); + v->domain->arch.virt_timer_base.offset - boot_count)); } return 0; } @@ -98,7 +104,7 @@ int virt_timer_restore(struct vcpu *v) migrate_timer(&v->arch.virt_timer.timer, v->processor); migrate_timer(&v->arch.phys_timer.timer, v->processor); - WRITE_SYSREG64(v->arch.virt_timer.offset, CNTVOFF_EL2); + WRITE_SYSREG64(v->domain->arch.virt_timer_base.offset, CNTVOFF_EL2); WRITE_SYSREG64(v->arch.virt_timer.cval, CNTV_CVAL_EL0); WRITE_SYSREG32(v->arch.virt_timer.ctl, CNTV_CTL_EL0); return 0; @@ -128,7 +134,8 @@ static int vtimer_emulate_32(struct cpu_user_regs *regs, union hsr hsr) if ( v->arch.phys_timer.ctl & CNTx_CTL_ENABLE ) { set_timer(&v->arch.phys_timer.timer, - v->arch.phys_timer.cval + v->arch.phys_timer.offset); + v->arch.phys_timer.cval + + v->domain->arch.phys_timer_base.offset); } else stop_timer(&v->arch.phys_timer.timer); @@ -137,7 +144,7 @@ static int vtimer_emulate_32(struct cpu_user_regs *regs, union hsr hsr) return 1; case HSR_CPREG32(CNTP_TVAL): - now = NOW() - v->arch.phys_timer.offset; + now = NOW() - v->domain->arch.phys_timer_base.offset; if ( cp32.read ) { *r = (uint32_t)(ns_to_ticks(v->arch.phys_timer.cval - now) & 0xffffffffull); @@ -149,7 +156,8 @@ static int vtimer_emulate_32(struct cpu_user_regs *regs, union hsr hsr) { v->arch.phys_timer.ctl &= ~CNTx_CTL_PENDING; set_timer(&v->arch.phys_timer.timer, - v->arch.phys_timer.cval + v->arch.phys_timer.offset); + v->arch.phys_timer.cval + + v->domain->arch.phys_timer_base.offset); } } @@ -174,7 +182,7 @@ static int vtimer_emulate_64(struct cpu_user_regs *regs, union hsr hsr) case HSR_CPREG64(CNTPCT): if ( cp64.read ) { - now = NOW() - v->arch.phys_timer.offset; + now = NOW() - v->domain->arch.phys_timer_base.offset; ticks = ns_to_ticks(now); *r1 = (uint32_t)(ticks & 0xffffffff); *r2 = (uint32_t)(ticks >> 32); diff --git a/xen/arch/arm/vtimer.h b/xen/arch/arm/vtimer.h index 690231d..bcf910e 100644 --- a/xen/arch/arm/vtimer.h +++ b/xen/arch/arm/vtimer.h @@ -20,6 +20,7 @@ #ifndef __ARCH_ARM_VTIMER_H__ #define __ARCH_ARM_VTIMER_H__ +extern int vcpu_domain_init(struct domain *d); extern int vcpu_vtimer_init(struct vcpu *v); extern int vtimer_emulate(struct cpu_user_regs *regs, union hsr hsr); extern int virt_timer_save(struct vcpu *v); diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h index 3fa266c2..cca7416 100644 --- a/xen/include/asm-arm/domain.h +++ b/xen/include/asm-arm/domain.h @@ -47,6 +47,14 @@ enum domain_type { #define is_pv64_domain(d) (0) #endif +struct vtimer { + struct vcpu *v; + int irq; + struct timer timer; + uint32_t ctl; + uint64_t cval; +}; + struct arch_domain { #ifdef CONFIG_ARM_64 @@ -62,6 +70,13 @@ struct arch_domain register_t vmpidr; struct { + uint64_t offset; + } phys_timer_base; + struct { + uint64_t offset; + } virt_timer_base; + + struct { /* * Covers access to other members of this struct _except_ for * shared_irqs where each member contains its own locking. @@ -91,15 +106,6 @@ struct arch_domain } __cacheline_aligned; -struct vtimer { - struct vcpu *v; - int irq; - struct timer timer; - uint32_t ctl; - uint64_t offset; - uint64_t cval; -}; - struct arch_vcpu { struct { -- 1.7.2.5
Stefano Stabellini
2013-May-08 11:41 UTC
[PATCH v8 6/7] xen/arm: clear pending irq queues on do_psci_cpu_on
Don''t inject irqs to vcpus that are down. Also when (re)activating a vcpu, clear the vgic and gic irq queues: we don''t want to inject any irqs that couldn''t be handled by the vcpu right before going offline. Changes in v7: - call gic_clear_pending_irqs before clearing VPF_down on do_psci_cpu_on. Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Acked-by: Ian Campbell <ian.campbell@citrix.com> --- xen/arch/arm/gic.c | 12 ++++++++++++ xen/arch/arm/psci.c | 3 +++ xen/arch/arm/vgic.c | 16 ++++++++++++++-- xen/include/asm-arm/gic.h | 2 ++ 4 files changed, 31 insertions(+), 2 deletions(-) diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c index 2a44cf8..8d35e0e 100644 --- a/xen/arch/arm/gic.c +++ b/xen/arch/arm/gic.c @@ -576,6 +576,18 @@ static void gic_restore_pending_irqs(struct vcpu *v) } +void gic_clear_pending_irqs(struct vcpu *v) +{ + struct pending_irq *p, *t; + unsigned long flags; + + spin_lock_irqsave(&gic.lock, flags); + v->arch.lr_mask = 0; + list_for_each_entry_safe ( p, t, &v->arch.vgic.lr_pending, lr_queue ) + list_del_init(&p->lr_queue); + spin_unlock_irqrestore(&gic.lock, flags); +} + static void gic_inject_irq_start(void) { register_t hcr = READ_SYSREG(HCR_EL2); diff --git a/xen/arch/arm/psci.c b/xen/arch/arm/psci.c index 1761791..18feead 100644 --- a/xen/arch/arm/psci.c +++ b/xen/arch/arm/psci.c @@ -15,6 +15,7 @@ #include <xen/types.h> #include <asm/current.h> +#include <asm/gic.h> #include <asm/psci.h> int do_psci_cpu_on(uint32_t vcpuid, register_t entry_point) @@ -33,6 +34,8 @@ int do_psci_cpu_on(uint32_t vcpuid, register_t entry_point) if ( (ctxt = alloc_vcpu_guest_context()) == NULL ) return PSCI_DENIED; + vgic_clear_pending_irqs(v); + memset(ctxt, 0, sizeof(*ctxt)); ctxt->user_regs.pc64 = (u64) entry_point; ctxt->sctlr = SCTLR_BASE; diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c index 0e9cc4a..f9c1a6b 100644 --- a/xen/arch/arm/vgic.c +++ b/xen/arch/arm/vgic.c @@ -641,6 +641,18 @@ struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq) return n; } +void vgic_clear_pending_irqs(struct vcpu *v) +{ + struct pending_irq *p, *t; + unsigned long flags; + + spin_lock_irqsave(&v->arch.vgic.lock, flags); + list_for_each_entry_safe ( p, t, &v->arch.vgic.inflight_irqs, inflight ) + list_del_init(&p->inflight); + gic_clear_pending_irqs(v); + spin_unlock_irqrestore(&v->arch.vgic.lock, flags); +} + void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq, int virtual) { int idx = irq >> 2, byte = irq & 0x3; @@ -652,8 +664,8 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq, int virtual) spin_lock_irqsave(&v->arch.vgic.lock, flags); - /* irq already pending */ - if (!list_empty(&n->inflight)) + /* vcpu offline or irq already pending */ + if (test_bit(_VPF_down, &v->pause_flags) || !list_empty(&n->inflight)) { spin_unlock_irqrestore(&v->arch.vgic.lock, flags); return; diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h index 0f9f74b..84ebc83 100644 --- a/xen/include/asm-arm/gic.h +++ b/xen/include/asm-arm/gic.h @@ -140,12 +140,14 @@ extern void domain_vgic_free(struct domain *d); extern int vcpu_vgic_init(struct vcpu *v); extern void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq,int virtual); +extern void vgic_clear_pending_irqs(struct vcpu *v); extern struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq); extern void gic_route_ppis(void); extern void gic_route_spis(void); extern void gic_inject(void); +extern void gic_clear_pending_irqs(struct vcpu *v); extern int gic_events_need_delivery(void); extern void __cpuinit init_maintenance_interrupt(void); -- 1.7.2.5
Stefano Stabellini
2013-May-08 11:41 UTC
[PATCH v8 7/7] xen/arm: initialize vtimer offset to CNTPCT
Currently we initialize the vtimer offset to CNTVCT + CNTVOFF = CNTPCT - CNTVOFF + CNTVOFF = CNTPCT Simply initialize vtimer offset to CNTPCT. Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Acked-by: Ian Campbell <ian.campbell@citrix.com> --- xen/arch/arm/vtimer.c | 3 +-- 1 files changed, 1 insertions(+), 2 deletions(-) diff --git a/xen/arch/arm/vtimer.c b/xen/arch/arm/vtimer.c index 97fe8ce..6993425 100644 --- a/xen/arch/arm/vtimer.c +++ b/xen/arch/arm/vtimer.c @@ -47,8 +47,7 @@ static void virt_timer_expired(void *data) int vcpu_domain_init(struct domain *d) { d->arch.phys_timer_base.offset = NOW(); - d->arch.virt_timer_base.offset = READ_SYSREG64(CNTVCT_EL0) + - READ_SYSREG64(CNTVOFF_EL2); + d->arch.virt_timer_base.offset = READ_SYSREG64(CNTPCT_EL0); return 0; } -- 1.7.2.5