Displaying 11 results from an estimated 11 matches for "end_pv_lock_ops_queued_spin_unlock".
Did you mean:
end_pv_lock_ops_queue_spin_unlock
2016 Dec 08
0
[PATCH 1/2] x86,paravirt: Fix native_patch()
...>
---
arch/x86/kernel/paravirt_patch_32.c | 4 ++++
arch/x86/kernel/paravirt_patch_64.c | 4 ++++
2 files changed, 8 insertions(+)
--- a/arch/x86/kernel/paravirt_patch_32.c
+++ b/arch/x86/kernel/paravirt_patch_32.c
@@ -56,15 +56,19 @@ unsigned native_patch(u8 type, u16 clobb
end = end_pv_lock_ops_queued_spin_unlock;
goto patch_site;
}
+ goto patch_default;
+
case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
if (pv_is_native_vcpu_is_preempted()) {
start = start_pv_lock_ops_vcpu_is_preempted;
end = end_pv_lock_ops_vcpu_is_preempted;
goto patch_site;
}
+ goto patch_defa...
2016 Nov 15
2
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...patch_ident_64(void *i
}
extern bool pv_is_native_spin_unlock(void);
+extern bool pv_is_native_vcpu_is_preempted(void);
unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
unsigned long addr, unsigned len)
@@ -54,6 +56,12 @@ unsigned native_patch(u8 type, u16 clobb
end = end_pv_lock_ops_queued_spin_unlock;
goto patch_site;
}
+ case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
+ if (pv_is_native_vcpu_is_preempted()) {
+ start = start_pv_lock_ops_vcpu_is_preempted;
+ end = end_pv_lock_ops_vcpu_is_preempted;
+ goto patch_site;
+ }
#endif
default:
--- a/arch/x86/kernel/pa...
2016 Nov 15
2
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...patch_ident_64(void *i
}
extern bool pv_is_native_spin_unlock(void);
+extern bool pv_is_native_vcpu_is_preempted(void);
unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
unsigned long addr, unsigned len)
@@ -54,6 +56,12 @@ unsigned native_patch(u8 type, u16 clobb
end = end_pv_lock_ops_queued_spin_unlock;
goto patch_site;
}
+ case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
+ if (pv_is_native_vcpu_is_preempted()) {
+ start = start_pv_lock_ops_vcpu_is_preempted;
+ end = end_pv_lock_ops_vcpu_is_preempted;
+ goto patch_site;
+ }
#endif
default:
--- a/arch/x86/kernel/pa...
2016 Dec 08
3
[PATCH 0/2] Fix paravirt fail
Two patches that cure fallout from commit:
3cded4179481 ("x86/paravirt: Optimize native pv_lock_ops.vcpu_is_preempted()")
2016 Dec 08
3
[PATCH 0/2] Fix paravirt fail
Two patches that cure fallout from commit:
3cded4179481 ("x86/paravirt: Optimize native pv_lock_ops.vcpu_is_preempted()")
2016 Dec 14
1
[PATCH] arch: x86: kernel: fixed unused label issue
...pv_mmu_ops, write_cr3);
+ PATCH_SITE(pv_mmu_ops, flush_tlb_single);
+ PATCH_SITE(pv_cpu_ops, wbinvd);
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
- case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
- if (pv_is_native_spin_unlock()) {
- start = start_pv_lock_ops_queued_spin_unlock;
- end = end_pv_lock_ops_queued_spin_unlock;
- goto patch_site;
- }
- goto patch_default;
+ case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
+ if (pv_is_native_spin_unlock()) {
+ start = start_pv_lock_ops_queued_spin_unlock;
+ end = end_pv_lock_ops_queued_spin_unlock;
+ goto patch_site;
+ }
+ goto patch_default;
- ca...
2016 Dec 14
1
[PATCH] arch: x86: kernel: fixed unused label issue
...pv_mmu_ops, write_cr3);
+ PATCH_SITE(pv_mmu_ops, flush_tlb_single);
+ PATCH_SITE(pv_cpu_ops, wbinvd);
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
- case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
- if (pv_is_native_spin_unlock()) {
- start = start_pv_lock_ops_queued_spin_unlock;
- end = end_pv_lock_ops_queued_spin_unlock;
- goto patch_site;
- }
- goto patch_default;
+ case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
+ if (pv_is_native_spin_unlock()) {
+ start = start_pv_lock_ops_queued_spin_unlock;
+ end = end_pv_lock_ops_queued_spin_unlock;
+ goto patch_site;
+ }
+ goto patch_default;
- ca...
2016 Nov 16
0
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...extern bool pv_is_native_spin_unlock(void);
> +extern bool pv_is_native_vcpu_is_preempted(void);
>
> unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
> unsigned long addr, unsigned len)
> @@ -54,6 +56,12 @@ unsigned native_patch(u8 type, u16 clobb
> end = end_pv_lock_ops_queued_spin_unlock;
> goto patch_site;
> }
> + case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
> + if (pv_is_native_vcpu_is_preempted()) {
> + start = start_pv_lock_ops_vcpu_is_preempted;
> + end = end_pv_lock_ops_vcpu_is_preempted;
> + goto patch_site;
> + }
>...
2016 Nov 02
13
[PATCH v7 00/11] implement vcpu preempted check
change from v6:
fix typos and remove uncessary comments.
change from v5:
spilt x86/kvm patch into guest/host part.
introduce kvm_write_guest_offset_cached.
fix some typos.
rebase patch onto 4.9.2
change from v4:
spilt x86 kvm vcpu preempted check into two patches.
add documentation patch.
add x86 vcpu preempted check patch under xen
add s390 vcpu preempted check patch
change from v3:
2016 Nov 02
13
[PATCH v7 00/11] implement vcpu preempted check
change from v6:
fix typos and remove uncessary comments.
change from v5:
spilt x86/kvm patch into guest/host part.
introduce kvm_write_guest_offset_cached.
fix some typos.
rebase patch onto 4.9.2
change from v4:
spilt x86 kvm vcpu preempted check into two patches.
add documentation patch.
add x86 vcpu preempted check patch under xen
add s390 vcpu preempted check patch
change from v3:
2018 Aug 13
11
[PATCH v2 00/11] x86/paravirt: several cleanups
This series removes some no longer needed stuff from paravirt
infrastructure and puts large quantities of paravirt ops under a new
config option PARAVIRT_XXL which is selected by XEN_PV only.
A pvops kernel without XEN_PV being configured is about 2.5% smaller
with this series applied.
tip commit 5800dc5c19f34e6e03b5adab1282535cb102fafd ("x86/paravirt:
Fix spectre-v2 mitigations for