search for: kvm_vcpu_is_preempted

Displaying 19 results from an estimated 19 matches for "kvm_vcpu_is_preempted".

2016 Oct 19
3
[PATCH v4 5/5] x86, kvm: support vcpu preempted check
...d; Why __u32 instead of __u8? > + __u32 pad[11]; > }; Please document the change in Documentation/virtual/kvm/msr.txt, section MSR_KVM_STEAL_TIME. > diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c > @@ -415,6 +415,15 @@ void kvm_disable_steal_time(void) > +static bool kvm_vcpu_is_preempted(int cpu) > +{ > + struct kvm_steal_time *src; > + > + src = &per_cpu(steal_time, cpu); > + > + return !!src->preempted; > +} > + > #ifdef CONFIG_SMP > static void __init kvm_smp_prepare_boot_cpu(void) > { > @@ -488,6 +497,8 @@ void __init kvm_guest_init...
2016 Oct 19
3
[PATCH v4 5/5] x86, kvm: support vcpu preempted check
...d; Why __u32 instead of __u8? > + __u32 pad[11]; > }; Please document the change in Documentation/virtual/kvm/msr.txt, section MSR_KVM_STEAL_TIME. > diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c > @@ -415,6 +415,15 @@ void kvm_disable_steal_time(void) > +static bool kvm_vcpu_is_preempted(int cpu) > +{ > + struct kvm_steal_time *src; > + > + src = &per_cpu(steal_time, cpu); > + > + return !!src->preempted; > +} > + > #ifdef CONFIG_SMP > static void __init kvm_smp_prepare_boot_cpu(void) > { > @@ -488,6 +497,8 @@ void __init kvm_guest_init...
2016 Nov 15
2
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...ne bool vcpu_is_preempted(int cpu) -{ - return pv_lock_ops.vcpu_is_preempted(cpu); -} -#endif - #include <asm/qspinlock.h> /* --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -415,15 +415,6 @@ void kvm_disable_steal_time(void) wrmsr(MSR_KVM_STEAL_TIME, 0, 0); } -static bool kvm_vcpu_is_preempted(int cpu) -{ - struct kvm_steal_time *src; - - src = &per_cpu(steal_time, cpu); - - return !!src->preempted; -} - #ifdef CONFIG_SMP static void __init kvm_smp_prepare_boot_cpu(void) { @@ -480,9 +471,6 @@ void __init kvm_guest_init(void) if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {...
2016 Nov 15
2
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...ne bool vcpu_is_preempted(int cpu) -{ - return pv_lock_ops.vcpu_is_preempted(cpu); -} -#endif - #include <asm/qspinlock.h> /* --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -415,15 +415,6 @@ void kvm_disable_steal_time(void) wrmsr(MSR_KVM_STEAL_TIME, 0, 0); } -static bool kvm_vcpu_is_preempted(int cpu) -{ - struct kvm_steal_time *src; - - src = &per_cpu(steal_time, cpu); - - return !!src->preempted; -} - #ifdef CONFIG_SMP static void __init kvm_smp_prepare_boot_cpu(void) { @@ -480,9 +471,6 @@ void __init kvm_guest_init(void) if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {...
2016 Oct 20
0
[PATCH v5 6/9] x86, kvm: support vcpu preempted check
...11]; }; #define KVM_STEAL_ALIGNMENT_BITS 5 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index edbbfc8..0b48dd2 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -415,6 +415,15 @@ void kvm_disable_steal_time(void) wrmsr(MSR_KVM_STEAL_TIME, 0, 0); } +static bool kvm_vcpu_is_preempted(int cpu) +{ + struct kvm_steal_time *src; + + src = &per_cpu(steal_time, cpu); + + return !!src->preempted; +} + #ifdef CONFIG_SMP static void __init kvm_smp_prepare_boot_cpu(void) { @@ -471,6 +480,9 @@ void __init kvm_guest_init(void) if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {...
2019 Dec 26
0
[PATCH 5/5] KVM: arm64: Support the vcpu preemption check
...f (!reg) >> + return -EFAULT; >> + >> + /* Pass the memory address to host via hypercall */ >> + arm_smccc_1_1_invoke(ARM_SMCCC_HV_PV_LOCK_PREEMPTED, >> + virt_to_phys(reg), &res); >> + >> + return 0; >> +} >> + >> +static bool kvm_vcpu_is_preempted(int cpu) >> +{ >> + struct pvlock_vcpu_state *reg = &per_cpu(pvlock_vcpu_region, cpu); >> + >> + if (reg) >> + return !!(reg->preempted & 1); >> + >> + return false; >> +} >> + >> +static int kvm_arm_init_pvlock(void) >>...
2016 Oct 19
0
[PATCH v4 5/5] x86, kvm: support vcpu preempted check
...11]; }; #define KVM_STEAL_ALIGNMENT_BITS 5 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index edbbfc8..0011bef 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -415,6 +415,15 @@ void kvm_disable_steal_time(void) wrmsr(MSR_KVM_STEAL_TIME, 0, 0); } +static bool kvm_vcpu_is_preempted(int cpu) +{ + struct kvm_steal_time *src; + + src = &per_cpu(steal_time, cpu); + + return !!src->preempted; +} + #ifdef CONFIG_SMP static void __init kvm_smp_prepare_boot_cpu(void) { @@ -488,6 +497,8 @@ void __init kvm_guest_init(void) kvm_guest_cpu_init(); #endif + pv_vcpu_ops.vcpu_...
2016 Nov 16
0
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...pu_is_preempted(cpu); > -} > -#endif > - > #include <asm/qspinlock.h> > > /* > --- a/arch/x86/kernel/kvm.c > +++ b/arch/x86/kernel/kvm.c > @@ -415,15 +415,6 @@ void kvm_disable_steal_time(void) > wrmsr(MSR_KVM_STEAL_TIME, 0, 0); > } > > -static bool kvm_vcpu_is_preempted(int cpu) > -{ > - struct kvm_steal_time *src; > - > - src = &per_cpu(steal_time, cpu); > - > - return !!src->preempted; > -} > - > #ifdef CONFIG_SMP > static void __init kvm_smp_prepare_boot_cpu(void) > { > @@ -480,9 +471,6 @@ void __init kvm_guest_init...
2016 Oct 19
10
[PATCH v4 0/5] implement vcpu preempted check
change from v3: add x86 vcpu preempted check patch change from v2: no code change, fix typos, update some comments change from v1: a simplier definition of default vcpu_is_preempted skip mahcine type check on ppc, and add config. remove dedicated macro. add one patch to drop overload of rwsem_spin_on_owner and mutex_spin_on_owner. add more comments thanks boqun and Peter's suggestion.
2016 Oct 19
10
[PATCH v4 0/5] implement vcpu preempted check
change from v3: add x86 vcpu preempted check patch change from v2: no code change, fix typos, update some comments change from v1: a simplier definition of default vcpu_is_preempted skip mahcine type check on ppc, and add config. remove dedicated macro. add one patch to drop overload of rwsem_spin_on_owner and mutex_spin_on_owner. add more comments thanks boqun and Peter's suggestion.
2019 Dec 17
10
[PATCH 0/5] KVM: arm64: vcpu preempted check support
From: Zengruan Ye <yezengruan at huawei.com> This patch set aims to support the vcpu_is_preempted() functionality under KVM/arm64, which allowing the guest to obtain the vcpu is currently running or not. This will enhance lock performance on overcommitted hosts (more runnable vcpus than physical cpus in the system) as doing busy waits for preempted vcpus will hurt system performance far
2019 Dec 17
10
[PATCH 0/5] KVM: arm64: vcpu preempted check support
From: Zengruan Ye <yezengruan at huawei.com> This patch set aims to support the vcpu_is_preempted() functionality under KVM/arm64, which allowing the guest to obtain the vcpu is currently running or not. This will enhance lock performance on overcommitted hosts (more runnable vcpus than physical cpus in the system) as doing busy waits for preempted vcpus will hurt system performance far
2016 Oct 20
15
[PATCH v5 0/9] implement vcpu preempted check
change from v4: spilt x86 kvm vcpu preempted check into two patches. add documentation patch. add x86 vcpu preempted check patch under xen add s390 vcpu preempted check patch change from v3: add x86 vcpu preempted check patch change from v2: no code change, fix typos, update some comments change from v1: a simplier definition of default vcpu_is_preempted skip mahcine type check on ppc,
2016 Oct 20
15
[PATCH v5 0/9] implement vcpu preempted check
change from v4: spilt x86 kvm vcpu preempted check into two patches. add documentation patch. add x86 vcpu preempted check patch under xen add s390 vcpu preempted check patch change from v3: add x86 vcpu preempted check patch change from v2: no code change, fix typos, update some comments change from v1: a simplier definition of default vcpu_is_preempted skip mahcine type check on ppc,
2016 Nov 02
13
[PATCH v7 00/11] implement vcpu preempted check
change from v6: fix typos and remove uncessary comments. change from v5: spilt x86/kvm patch into guest/host part. introduce kvm_write_guest_offset_cached. fix some typos. rebase patch onto 4.9.2 change from v4: spilt x86 kvm vcpu preempted check into two patches. add documentation patch. add x86 vcpu preempted check patch under xen add s390 vcpu preempted check patch change from v3:
2016 Nov 02
13
[PATCH v7 00/11] implement vcpu preempted check
change from v6: fix typos and remove uncessary comments. change from v5: spilt x86/kvm patch into guest/host part. introduce kvm_write_guest_offset_cached. fix some typos. rebase patch onto 4.9.2 change from v4: spilt x86 kvm vcpu preempted check into two patches. add documentation patch. add x86 vcpu preempted check patch under xen add s390 vcpu preempted check patch change from v3:
2019 Dec 26
7
[PATCH v2 0/6] KVM: arm64: VCPU preempted check support
This patch set aims to support the vcpu_is_preempted() functionality under KVM/arm64, which allowing the guest to obtain the VCPU is currently running or not. This will enhance lock performance on overcommitted hosts (more runnable VCPUs than physical CPUs in the system) as doing busy waits for preempted VCPUs will hurt system performance far worse than early yielding. We have observed some
2016 Oct 28
16
[PATCH v6 00/11] implement vcpu preempted check
change from v5: spilt x86/kvm patch into guest/host part. introduce kvm_write_guest_offset_cached. fix some typos. rebase patch onto 4.9.2 change from v4: spilt x86 kvm vcpu preempted check into two patches. add documentation patch. add x86 vcpu preempted check patch under xen add s390 vcpu preempted check patch change from v3: add x86 vcpu preempted check patch change from v2: no code
2016 Oct 28
16
[PATCH v6 00/11] implement vcpu preempted check
change from v5: spilt x86/kvm patch into guest/host part. introduce kvm_write_guest_offset_cached. fix some typos. rebase patch onto 4.9.2 change from v4: spilt x86 kvm vcpu preempted check into two patches. add documentation patch. add x86 vcpu preempted check patch under xen add s390 vcpu preempted check patch change from v3: add x86 vcpu preempted check patch change from v2: no code