search for: x86_feature_hypervisor

Displaying 20 results from an estimated 99 matches for "x86_feature_hypervisor".

2016 Mar 29
1
[PATCH 02/10] x86/cpufeature: Kill cpu_has_hypervisor
.../arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 7946c4231169..d5045c8e2e63 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -677,7 +677,7 @@ static int __init cstate_pmu_init(void) { int err; - if (cpu_has_hypervisor) + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) return -ENODEV; err = cstate_init(); diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 7012d18bb293..3f6d8b5672d5 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -1383,7 +1383,7 @@ static int __init intel_uncore_init(void...
2016 Mar 29
1
[PATCH 02/10] x86/cpufeature: Kill cpu_has_hypervisor
.../arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 7946c4231169..d5045c8e2e63 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -677,7 +677,7 @@ static int __init cstate_pmu_init(void) { int err; - if (cpu_has_hypervisor) + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) return -ENODEV; err = cstate_init(); diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 7012d18bb293..3f6d8b5672d5 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -1383,7 +1383,7 @@ static int __init intel_uncore_init(void...
2014 Mar 20
3
[PATCH v7 06/11] pvqspinlock, x86: Allow unfair queue spinlock in a KVM guest
...w_inc(&paravirt_unfairlocks_enabled); > + printk(KERN_INFO "KVM setup unfair spinlock\n"); > + > + return 0; > +} > +early_initcall(kvm_unfair_locks_init_jump); > +#endif > No! Please do what I asked you to do. You are not handling Hyper-V or VMWare. Just use X86_FEATURE_HYPERVISOR and it will cover all hypervisors that actually follow Intel's guidelines. Paolo
2014 Mar 20
3
[PATCH v7 06/11] pvqspinlock, x86: Allow unfair queue spinlock in a KVM guest
...w_inc(&paravirt_unfairlocks_enabled); > + printk(KERN_INFO "KVM setup unfair spinlock\n"); > + > + return 0; > +} > +early_initcall(kvm_unfair_locks_init_jump); > +#endif > No! Please do what I asked you to do. You are not handling Hyper-V or VMWare. Just use X86_FEATURE_HYPERVISOR and it will cover all hypervisors that actually follow Intel's guidelines. Paolo
2017 Nov 01
2
[PATCH] x86/paravirt: Add kernel parameter to choose paravirt lock type
...here are 3 options: + * 1) queued - the native queued spinlock + * 2) pv - the paravirt queued spinlock (if CONFIG_PARAVIRT_SPINLOCKS) + * 3) unfair - the simple TATAS unfair lock + * + * If this argument is not specified, the kernel will automatically choose + * an appropriate one depending on X86_FEATURE_HYPERVISOR and hypervisor + * specific settings. + */ +enum pv_spinlock_type __read_mostly pv_spinlock_type = locktype_auto; + +static int __init pvlock_setup(char *s) +{ + if (!s) + return -EINVAL; + + if (!strcmp(s, "queued")) + pv_spinlock_type = locktype_queued; + else if (!strcmp(s, "pv&...
2017 Nov 01
2
[PATCH] x86/paravirt: Add kernel parameter to choose paravirt lock type
...here are 3 options: + * 1) queued - the native queued spinlock + * 2) pv - the paravirt queued spinlock (if CONFIG_PARAVIRT_SPINLOCKS) + * 3) unfair - the simple TATAS unfair lock + * + * If this argument is not specified, the kernel will automatically choose + * an appropriate one depending on X86_FEATURE_HYPERVISOR and hypervisor + * specific settings. + */ +enum pv_spinlock_type __read_mostly pv_spinlock_type = locktype_auto; + +static int __init pvlock_setup(char *s) +{ + if (!s) + return -EINVAL; + + if (!strcmp(s, "queued")) + pv_spinlock_type = locktype_queued; + else if (!strcmp(s, "pv&...
2017 Sep 06
2
[PATCH v2 1/2] paravirt/locks: use new static key for controlling call of virt_spin_lock()
..._spin_lock_key); > + > +void native_pv_lock_init(void) __init; > + > #define virt_spin_lock virt_spin_lock > static inline bool virt_spin_lock(struct qspinlock *lock) > { > + if (!static_branch_likely(&virt_spin_lock_key)) > + return false; > if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) > return false; > > @@ -65,6 +72,10 @@ static inline bool virt_spin_lock(struct qspinlock *lock) > > return true; > } > +#else > +static inline void native_pv_lock_init(void) > +{ > +} > #endif /* CONFIG_PARAVIRT */ > > #include <asm-generic...
2017 Sep 06
2
[PATCH v2 1/2] paravirt/locks: use new static key for controlling call of virt_spin_lock()
..._spin_lock_key); > + > +void native_pv_lock_init(void) __init; > + > #define virt_spin_lock virt_spin_lock > static inline bool virt_spin_lock(struct qspinlock *lock) > { > + if (!static_branch_likely(&virt_spin_lock_key)) > + return false; > if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) > return false; > > @@ -65,6 +72,10 @@ static inline bool virt_spin_lock(struct qspinlock *lock) > > return true; > } > +#else > +static inline void native_pv_lock_init(void) > +{ > +} > #endif /* CONFIG_PARAVIRT */ > > #include <asm-generic...
2008 Nov 19
0
[PATCH] support CPUID hypervisor feature bit
...; @@ -1900,11 +1902,12 @@ void domain_cpuid( *ebx = cpuid->ebx; *ecx = cpuid->ecx; *edx = cpuid->edx; - return; + break; } } - *eax = *ebx = *ecx = *edx = 0; + if ( input == 1 ) + *ecx |= 1U << (X86_FEATURE_HYPERVISOR % 32); } void vcpu_kick(struct vcpu *v) Index: 2008-10-27/xen/arch/x86/traps.c =================================================================== --- 2008-10-27.orig/xen/arch/x86/traps.c 2008-10-27 11:14:44.000000000 +0100 +++ 2008-10-27/xen/arch/x86/traps.c 2008-11-19 10:16:27.000000000 +0100...
2017 Sep 05
3
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...; @@ -17,6 +17,25 @@ static inline void native_queued_spin_unlock(struct qspinlock *lock) >>> smp_store_release((u8 *)lock, 0); >>> } >>> >>> +static inline bool native_virt_spin_lock(struct qspinlock *lock) >>> +{ >>> + if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) >>> + return false; >>> + >>> + /* >>> + * On hypervisors without PARAVIRT_SPINLOCKS support we fall >>> + * back to a Test-and-Set spinlock, because fair locks have >>> + * horrible lock 'holder' preemption issues. >>> +...
2017 Sep 05
3
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...; @@ -17,6 +17,25 @@ static inline void native_queued_spin_unlock(struct qspinlock *lock) >>> smp_store_release((u8 *)lock, 0); >>> } >>> >>> +static inline bool native_virt_spin_lock(struct qspinlock *lock) >>> +{ >>> + if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) >>> + return false; >>> + >>> + /* >>> + * On hypervisors without PARAVIRT_SPINLOCKS support we fall >>> + * back to a Test-and-Set spinlock, because fair locks have >>> + * horrible lock 'holder' preemption issues. >>> +...
2017 Sep 05
2
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...> +++ b/arch/x86/include/asm/qspinlock.h > @@ -17,6 +17,25 @@ static inline void native_queued_spin_unlock(struct qspinlock *lock) > smp_store_release((u8 *)lock, 0); > } > > +static inline bool native_virt_spin_lock(struct qspinlock *lock) > +{ > + if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) > + return false; > + > + /* > + * On hypervisors without PARAVIRT_SPINLOCKS support we fall > + * back to a Test-and-Set spinlock, because fair locks have > + * horrible lock 'holder' preemption issues. > + */ > + > + do { > + while (atomic_read(&l...
2017 Sep 05
2
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...> +++ b/arch/x86/include/asm/qspinlock.h > @@ -17,6 +17,25 @@ static inline void native_queued_spin_unlock(struct qspinlock *lock) > smp_store_release((u8 *)lock, 0); > } > > +static inline bool native_virt_spin_lock(struct qspinlock *lock) > +{ > + if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) > + return false; > + > + /* > + * On hypervisors without PARAVIRT_SPINLOCKS support we fall > + * back to a Test-and-Set spinlock, because fair locks have > + * horrible lock 'holder' preemption issues. > + */ > + > + do { > + while (atomic_read(&l...
2017 Sep 05
2
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...> +++ b/arch/x86/include/asm/qspinlock.h > @@ -17,6 +17,25 @@ static inline void native_queued_spin_unlock(struct qspinlock *lock) > smp_store_release((u8 *)lock, 0); > } > > +static inline bool native_virt_spin_lock(struct qspinlock *lock) > +{ > + if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) > + return false; > + I think you can take the above if statement out as you has done test in native_pv_lock_init(). So the test will also be false here. As this patch series is x86 specific, you should probably add "x86/" in front of paravirt in the patch titles. Cheers, Long...
2017 Sep 05
2
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...> +++ b/arch/x86/include/asm/qspinlock.h > @@ -17,6 +17,25 @@ static inline void native_queued_spin_unlock(struct qspinlock *lock) > smp_store_release((u8 *)lock, 0); > } > > +static inline bool native_virt_spin_lock(struct qspinlock *lock) > +{ > + if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) > + return false; > + I think you can take the above if statement out as you has done test in native_pv_lock_init(). So the test will also be false here. As this patch series is x86 specific, you should probably add "x86/" in front of paravirt in the patch titles. Cheers, Long...
2017 Sep 05
0
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...4 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -17,6 +17,25 @@ static inline void native_queued_spin_unlock(struct qspinlock *lock) smp_store_release((u8 *)lock, 0); } +static inline bool native_virt_spin_lock(struct qspinlock *lock) +{ + if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) + return false; + + /* + * On hypervisors without PARAVIRT_SPINLOCKS support we fall + * back to a Test-and-Set spinlock, because fair locks have + * horrible lock 'holder' preemption issues. + */ + + do { + while (atomic_read(&lock->val) != 0) + cpu_relax(); + } while (ato...
2014 Apr 02
2
[PATCH v8 00/10] qspinlock: a 4-byte queue spinlock with PV support
...erver >> due to long header. There is no change in content. >> >> v7->v8: >> - Remove one unneeded atomic operation from the slowpath, thus >> improving performance. >> - Simplify some of the codes and add more comments. >> - Test for X86_FEATURE_HYPERVISOR CPU feature bit to enable/disable >> unfair lock. >> - Reduce unfair lock slowpath lock stealing frequency depending >> on its distance from the queue head. >> - Add performance data for IvyBridge-EX CPU. > FYI, your v7 patch with 32 VCPUs (on a 32 cpu soc...
2014 Apr 02
2
[PATCH v8 00/10] qspinlock: a 4-byte queue spinlock with PV support
...erver >> due to long header. There is no change in content. >> >> v7->v8: >> - Remove one unneeded atomic operation from the slowpath, thus >> improving performance. >> - Simplify some of the codes and add more comments. >> - Test for X86_FEATURE_HYPERVISOR CPU feature bit to enable/disable >> unfair lock. >> - Reduce unfair lock slowpath lock stealing frequency depending >> on its distance from the queue head. >> - Add performance data for IvyBridge-EX CPU. > FYI, your v7 patch with 32 VCPUs (on a 32 cpu soc...
2017 Nov 01
0
[PATCH] x86/paravirt: Add kernel parameter to choose paravirt lock type
...) queued - the native queued spinlock > + * 2) pv - the paravirt queued spinlock (if CONFIG_PARAVIRT_SPINLOCKS) > + * 3) unfair - the simple TATAS unfair lock > + * > + * If this argument is not specified, the kernel will automatically choose > + * an appropriate one depending on X86_FEATURE_HYPERVISOR and hypervisor > + * specific settings. > + */ > +enum pv_spinlock_type __read_mostly pv_spinlock_type = locktype_auto; > + > +static int __init pvlock_setup(char *s) > +{ > + if (!s) > + return -EINVAL; > + > + if (!strcmp(s, "queued")) > + pv_spinlock_...
2017 Sep 05
7
[PATCH 0/4] make virt_spin_lock() a pvops function
With virt_spin_lock() being a pvops function the bare metal case can be optimized by patching the call away completely. In case a kernel running as a guest it can decide whether to use paravitualized spinlocks, the current fallback to the unfair test-and-set scheme, or to mimic the bare metal behavior. Juergen Gross (4): paravirt: add generic _paravirt_false() function paravirt: switch