search for: kvm_guest_init

Displaying 20 results from an estimated 86 matches for "kvm_guest_init".

2008 Apr 16
1
[PATCH] kvm: move kvmclock initialization inside kvm_guest_init
...clock_init(); -#endif - #ifdef CONFIG_VMI /* * Must be after max_low_pfn is determined, and before kernel diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h index 3ddce03..c5e662c 100644 --- a/include/linux/kvm_para.h +++ b/include/linux/kvm_para.h @@ -28,6 +28,11 @@ void __init kvm_guest_init(void); #else #define kvm_guest_init() do { } while (0) #endif +#ifdef CONFIG_KVM_CLOCK +void kvmclock_init(void); +#else +#define kvmclock_init() do { } while (0) +#endif static inline int kvm_para_has_feature(unsigned int feature) { -- 1.5.0.6
2008 Apr 16
1
[PATCH] kvm: move kvmclock initialization inside kvm_guest_init
...clock_init(); -#endif - #ifdef CONFIG_VMI /* * Must be after max_low_pfn is determined, and before kernel diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h index 3ddce03..c5e662c 100644 --- a/include/linux/kvm_para.h +++ b/include/linux/kvm_para.h @@ -28,6 +28,11 @@ void __init kvm_guest_init(void); #else #define kvm_guest_init() do { } while (0) #endif +#ifdef CONFIG_KVM_CLOCK +void kvmclock_init(void); +#else +#define kvmclock_init() do { } while (0) +#endif static inline int kvm_para_has_feature(unsigned int feature) { -- 1.5.0.6
2019 May 27
3
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
...&per_cpu(steal_time, cpu); state = READ_ONCE(src->preempted); if ((state & KVM_VCPU_PREEMPTED)) { @@ -603,7 +606,7 @@ static void kvm_flush_tlb_others(const s } } - native_flush_tlb_others(flushmask, info); + native_flush_tlb_multi(flushmask, info); } static void __init kvm_guest_init(void) @@ -628,9 +631,8 @@ static void __init kvm_guest_init(void) if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && !kvm_para_has_hint(KVM_HINTS_REALTIME) && kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { - pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others; + p...
2019 May 27
3
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
...&per_cpu(steal_time, cpu); state = READ_ONCE(src->preempted); if ((state & KVM_VCPU_PREEMPTED)) { @@ -603,7 +606,7 @@ static void kvm_flush_tlb_others(const s } } - native_flush_tlb_others(flushmask, info); + native_flush_tlb_multi(flushmask, info); } static void __init kvm_guest_init(void) @@ -628,9 +631,8 @@ static void __init kvm_guest_init(void) if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && !kvm_para_has_hint(KVM_HINTS_REALTIME) && kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { - pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others; + p...
2019 Dec 26
0
[PATCH 5/5] KVM: arm64: Support the vcpu preemption check
...t; index 7b1c81b544bb..a2cd0183bbef 100644 >> --- a/arch/arm64/include/asm/paravirt.h >> +++ b/arch/arm64/include/asm/paravirt.h >> @@ -29,6 +29,8 @@ static inline u64 paravirt_steal_clock(int cpu) >> >> int __init pv_time_init(void); >> >> +int __init kvm_guest_init(void); >> + > > This is a *very* generic name - I suggest something like pv_lock_init() > so it's clear what the function actually does. > >> __visible bool __native_vcpu_is_preempted(int cpu); >> >> static inline bool pv_vcpu_is_preempted(int cpu) >...
2017 Nov 17
2
[PATCH RFC v3 3/6] sched/idle: Add a generic poll before enter real idle path
...: :"memory"); /* 2. halt */ ???????????? + ??? ???? +??????? /* 3. get the last idle state's residency time */ ???????????? + ??? ???? +??????? /* 4. update poll duration based on last idle state's residency time */ ???????????? +} ???????????? + ????????????? void __init kvm_guest_init(void) ????????????? { ???????????????????? int i; ???????????? @@ -490,6 +496,8 @@ void __init kvm_guest_init(void) ???????????????????? if (kvmclock_vsyscall) ???????????????????????????? kvm_setup_vsyscall_timeinfo(); ???????????? +?????? pv_irq_ops.safe_halt = kvm_safe_halt; ????????????...
2017 Nov 17
2
[PATCH RFC v3 3/6] sched/idle: Add a generic poll before enter real idle path
...: :"memory"); /* 2. halt */ ???????????? + ??? ???? +??????? /* 3. get the last idle state's residency time */ ???????????? + ??? ???? +??????? /* 4. update poll duration based on last idle state's residency time */ ???????????? +} ???????????? + ????????????? void __init kvm_guest_init(void) ????????????? { ???????????????????? int i; ???????????? @@ -490,6 +496,8 @@ void __init kvm_guest_init(void) ???????????????????? if (kvmclock_vsyscall) ???????????????????????????? kvm_setup_vsyscall_timeinfo(); ???????????? +?????? pv_irq_ops.safe_halt = kvm_safe_halt; ????????????...
2019 May 27
0
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
...CE(src->preempted); > if ((state & KVM_VCPU_PREEMPTED)) { > @@ -603,7 +606,7 @@ static void kvm_flush_tlb_others(const s > } > } > > - native_flush_tlb_others(flushmask, info); > + native_flush_tlb_multi(flushmask, info); > } > > static void __init kvm_guest_init(void) > @@ -628,9 +631,8 @@ static void __init kvm_guest_init(void) > if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && > !kvm_para_has_hint(KVM_HINTS_REALTIME) && > kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { > - pv_ops.mmu.flush_tlb_others = k...
2017 Nov 17
0
[PATCH RFC v3 3/6] sched/idle: Add a generic poll before enter real idle path
...alt */ > ???????????? + > ??? ???? +??????? /* 3. get the last idle state's residency time */ > ???????????? + > ??? ???? +??????? /* 4. update poll duration based on last idle state's > residency time */ > ???????????? +} > ???????????? + > ????????????? void __init kvm_guest_init(void) > ????????????? { > ???????????????????? int i; > ???????????? @@ -490,6 +496,8 @@ void __init kvm_guest_init(void) > ???????????????????? if (kvmclock_vsyscall) > ???????????????????????????? kvm_setup_vsyscall_timeinfo(); > > ???????????? +?????? pv_irq_ops.safe_halt =...
2019 Dec 17
10
[PATCH 0/5] KVM: arm64: vcpu preempted check support
From: Zengruan Ye <yezengruan at huawei.com> This patch set aims to support the vcpu_is_preempted() functionality under KVM/arm64, which allowing the guest to obtain the vcpu is currently running or not. This will enhance lock performance on overcommitted hosts (more runnable vcpus than physical cpus in the system) as doing busy waits for preempted vcpus will hurt system performance far
2019 Dec 17
10
[PATCH 0/5] KVM: arm64: vcpu preempted check support
From: Zengruan Ye <yezengruan at huawei.com> This patch set aims to support the vcpu_is_preempted() functionality under KVM/arm64, which allowing the guest to obtain the vcpu is currently running or not. This will enhance lock performance on overcommitted hosts (more runnable vcpus than physical cpus in the system) as doing busy waits for preempted vcpus will hurt system performance far
2017 Nov 16
1
[PATCH RFC v3 3/6] sched/idle: Add a generic poll before enter real idle path
On 2017-11-16 06:03, Thomas Gleixner wrote: > On Wed, 15 Nov 2017, Peter Zijlstra wrote: > >> On Mon, Nov 13, 2017 at 06:06:02PM +0800, Quan Xu wrote: >>> From: Yang Zhang <yang.zhang.wz at gmail.com> >>> >>> Implement a generic idle poll which resembles the functionality >>> found in arch/. Provide weak arch_cpu_idle_poll function which
2019 May 25
3
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
...h_tlb_multi(mask, info) + #define flush_tlb_others(mask, info) \ native_flush_tlb_others(mask, info) diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 3f0cc828cc36..c1c2b88ea3f1 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -643,6 +643,7 @@ static void __init kvm_guest_init(void) kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others; pv_ops.mmu.tlb_remove_table = tlb_remove_table; + static_key_disable(&flush_tlb_multi_enabled.key); } if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) diff --git a/arch/x86/...
2019 May 25
3
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
...h_tlb_multi(mask, info) + #define flush_tlb_others(mask, info) \ native_flush_tlb_others(mask, info) diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 3f0cc828cc36..c1c2b88ea3f1 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -643,6 +643,7 @@ static void __init kvm_guest_init(void) kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others; pv_ops.mmu.tlb_remove_table = tlb_remove_table; + static_key_disable(&flush_tlb_multi_enabled.key); } if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) diff --git a/arch/x86/...
2016 Oct 19
3
[PATCH v4 5/5] x86, kvm: support vcpu preempted check
...u_is_preempted(int cpu) > +{ > + struct kvm_steal_time *src; > + > + src = &per_cpu(steal_time, cpu); > + > + return !!src->preempted; > +} > + > #ifdef CONFIG_SMP > static void __init kvm_smp_prepare_boot_cpu(void) > { > @@ -488,6 +497,8 @@ void __init kvm_guest_init(void) > kvm_guest_cpu_init(); > #endif > > + pv_vcpu_ops.vcpu_is_preempted = kvm_vcpu_is_preempted; Would be nicer to assign conditionally in the KVM_FEATURE_STEAL_TIME block. The steal_time structure has to be zeroed, so this code would work, but the native function (return fals...
2016 Oct 19
3
[PATCH v4 5/5] x86, kvm: support vcpu preempted check
...u_is_preempted(int cpu) > +{ > + struct kvm_steal_time *src; > + > + src = &per_cpu(steal_time, cpu); > + > + return !!src->preempted; > +} > + > #ifdef CONFIG_SMP > static void __init kvm_smp_prepare_boot_cpu(void) > { > @@ -488,6 +497,8 @@ void __init kvm_guest_init(void) > kvm_guest_cpu_init(); > #endif > > + pv_vcpu_ops.vcpu_is_preempted = kvm_vcpu_is_preempted; Would be nicer to assign conditionally in the KVM_FEATURE_STEAL_TIME block. The steal_time structure has to be zeroed, so this code would work, but the native function (return fals...
2013 Jul 25
2
[PATCH V2 4/4] x86: correctly detect hypervisor
...t;VMware") && __vmware_platform()) - return true; + return 1; - return false; + return 0; } /* diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index a96d32c..7817afd 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -498,11 +498,9 @@ void __init kvm_guest_init(void) #endif } -static bool __init kvm_detect(void) +static uint32_t __init kvm_detect(void) { - if (!kvm_para_available()) - return false; - return true; + return kvm_cpuid_base(); } const struct hypervisor_x86 x86_hyper_kvm __refconst = { diff --git a/arch/x86/xen/enlighten.c b/arch/x86...
2013 Jul 25
2
[PATCH V2 4/4] x86: correctly detect hypervisor
...t;VMware") && __vmware_platform()) - return true; + return 1; - return false; + return 0; } /* diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index a96d32c..7817afd 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -498,11 +498,9 @@ void __init kvm_guest_init(void) #endif } -static bool __init kvm_detect(void) +static uint32_t __init kvm_detect(void) { - if (!kvm_para_available()) - return false; - return true; + return kvm_cpuid_base(); } const struct hypervisor_x86 x86_hyper_kvm __refconst = { diff --git a/arch/x86/xen/enlighten.c b/arch/x86...
2013 Jul 25
2
[PATCH V2 4/4] x86: correctly detect hypervisor
...t;VMware") && __vmware_platform()) - return true; + return 1; - return false; + return 0; } /* diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index a96d32c..7817afd 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -498,11 +498,9 @@ void __init kvm_guest_init(void) #endif } -static bool __init kvm_detect(void) +static uint32_t __init kvm_detect(void) { - if (!kvm_para_available()) - return false; - return true; + return kvm_cpuid_base(); } const struct hypervisor_x86 x86_hyper_kvm __refconst = { diff --git a/arch/x86/xen/enlighten.c b/arch/x86...
2014 Mar 19
1
[PATCH v6 05/11] pvqspinlock, x86: Allow unfair spinlock in a PV guest
On 03/19/2014 06:07 AM, Paolo Bonzini wrote: > Il 19/03/2014 04:15, Waiman Long ha scritto: >>>> You should see the same values with the PV ticketlock. It is not clear >>>> to me if this testing did include that variant of locks? >>> >>> Yes, PV is fine. But up to this point of the series, we are concerned >>> about spinlock performance when