Stefano Stabellini
2010-May-10 14:20 UTC
[Xen-devel] [PATCH 11/11] Support VIRQ_TIMER and pvclock on HVM, disable hpet
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> --- arch/x86/xen/enlighten.c | 46 +++++++++++++++++++++++++++++++++++++- arch/x86/xen/time.c | 2 + drivers/xen/manage.c | 1 + include/xen/interface/features.h | 3 ++ 4 files changed, 51 insertions(+), 1 deletions(-) diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 84b7a84..3362876 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -58,6 +58,7 @@ #include <asm/tlbflush.h> #include <asm/reboot.h> #include <asm/stackprotector.h> +#include <asm/hpet.h> #include "xen-ops.h" #include "mmu.h" @@ -1253,6 +1254,7 @@ static void init_shared_info(void) { struct xen_add_to_physmap xatp; static struct shared_info *shared_info_page = 0; + int cpu; if (!shared_info_page) shared_info_page = (struct shared_info *) alloc_bootmem_pages(PAGE_SIZE); @@ -1267,7 +1269,48 @@ static void init_shared_info(void) /* Don''t do the full vcpu_info placement stuff until we have a possible map and a non-dummy shared_info. */ - per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; + /* This code is run at resume time so make sure all the online cpus + * have xen_vcpu properly set */ + for_each_online_cpu(cpu) + per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; +} + +static void xen_hvm_setup_cpu_clockevents(void) +{ + int cpu = smp_processor_id(); + xen_setup_timer(cpu); + per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; + xen_setup_cpu_clockevents(); +} + +static void init_hvm_time(void) +{ + int ret; + struct xen_hvm_param a; + +#ifdef CONFIG_SMP + /* vector callback is needed otherwise we cannot receive interrupts + * on cpu > 0 */ + if (!xen_have_vector_callback) + return; +#endif + if (!xen_feature(XENFEAT_hvm_safe_pvclock)) { + printk(KERN_WARNING "Xen doesn''t support pvclock on HVM," + "disable pv timer\n"); + return; + } + + pv_time_ops = xen_time_ops; + x86_init.timers.timer_init = xen_time_init; + x86_init.timers.setup_percpu_clockev = x86_init_noop; + x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents; + + x86_platform.calibrate_tsc = xen_tsc_khz; + x86_platform.get_wallclock = xen_get_wallclock; + x86_platform.set_wallclock = xen_set_wallclock; + + /* make sure hpet doesn''t think that it is initialized */ + hpet_address = 0L; } int xen_set_callback_via(uint64_t via) @@ -1313,6 +1356,7 @@ void xen_guest_init(void) } have_vcpu_info_placement = 0; x86_init.irqs.intr_init = xen_init_IRQ; + init_hvm_time(); } static int __init parse_unplug(char *arg) diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 0a5aa44..567aa2b 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -471,6 +471,8 @@ void xen_timer_resume(void) for_each_online_cpu(cpu) { if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL)) BUG(); + if (xen_hvm_domain()) + setup_runstate_info(cpu); } } diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index 6b911d0..9b54305 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -58,6 +58,7 @@ static int xen_hvm_suspend(void *data) if (!*cancelled) { xen_irq_resume(); platform_pci_resume_hook(); + xen_timer_resume(); } return 0; diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h index 8ab08b9..70d2563 100644 --- a/include/xen/interface/features.h +++ b/include/xen/interface/features.h @@ -44,6 +44,9 @@ /* x86: Does this Xen host support the HVM callback vector type? */ #define XENFEAT_hvm_callback_vector 8 +/* x86: pvclock algorithm is safe to use on HVM */ +#define XENFEAT_hvm_safe_pvclock 9 + #define XENFEAT_NR_SUBMAPS 1 #endif /* __XEN_PUBLIC_FEATURES_H__ */ -- 1.5.4.3 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel