Displaying 20 results from an estimated 48 matches for "paravirt_patch_ident_64".
Did you mean:
paravirt_patch_ident_32
2015 Apr 30
0
[PATCH 4/6] x86: introduce new pvops function spin_unlock
..._ticket_t head);
+ void (*unlock)(arch_spinlock_t *lock);
};
/* This contains all the paravirt structures: we get a convenient
@@ -398,6 +399,7 @@ extern struct pv_lock_ops pv_lock_ops;
unsigned paravirt_patch_nop(void);
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
+unsigned paravirt_patch_unlock(void *insnbuf, unsigned len);
unsigned paravirt_patch_ignore(unsigned len);
unsigned paravirt_patch_call(void *insnbuf,
const void *target, u16 tgt_clobbers,
@@ -620,6 +622,12 @@ int paravirt_disable_iospace(void);
__PVOP_CA...
2018 Aug 13
0
[PATCH v2 02/11] x86/paravirt: remove clobbers parameter from paravirt patch functions
...ned (*patch)(u8 type, u16 clobber, void *insnbuf,
+ unsigned (*patch)(u8 type, void *insnbuf,
unsigned long addr, unsigned len);
} __no_randomize_layout;
@@ -370,14 +370,13 @@ extern struct pv_lock_ops pv_lock_ops;
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
-unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
+unsigned paravirt_patch_default(u8 type, void *insnbuf,
unsigned long addr, unsigned len);
unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
const char *start, const char...
2016 Nov 15
2
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...uot;mov %c
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)");
+DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "movl $0, %eax");
#endif
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
@@ -26,6 +27,7 @@ unsigned paravirt_patch_ident_64(void *i
}
extern bool pv_is_native_spin_unlock(void);
+extern bool pv_is_native_vcpu_is_preempted(void);
unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
unsigned long addr, unsigned len)
@@ -54,6 +56,12 @@ unsigned native_patch(u8 type, u16 clobb
end = end_pv_lock_op...
2016 Nov 15
2
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...uot;mov %c
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)");
+DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "movl $0, %eax");
#endif
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
@@ -26,6 +27,7 @@ unsigned paravirt_patch_ident_64(void *i
}
extern bool pv_is_native_spin_unlock(void);
+extern bool pv_is_native_vcpu_is_preempted(void);
unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
unsigned long addr, unsigned len)
@@ -54,6 +56,12 @@ unsigned native_patch(u8 type, u16 clobb
end = end_pv_lock_op...
2015 Nov 03
1
[RFC PATCH] x86/paravirt: Kill some unused patching functions
...v_lock_ops;
__visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \
asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name))
-unsigned paravirt_patch_nop(void);
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
-unsigned paravirt_patch_ignore(unsigned len);
unsigned paravirt_patch_call(void *insnbuf,
const void *target, u16 tgt_clobbers,
unsigned long addr, u16 site_clobbers,
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index c2130ae...
2015 Nov 03
1
[RFC PATCH] x86/paravirt: Kill some unused patching functions
...v_lock_ops;
__visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \
asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name))
-unsigned paravirt_patch_nop(void);
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
-unsigned paravirt_patch_ignore(unsigned len);
unsigned paravirt_patch_call(void *insnbuf,
const void *target, u16 tgt_clobbers,
unsigned long addr, u16 site_clobbers,
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index c2130ae...
2016 Nov 16
0
[PATCH v7 06/11] x86, paravirt: Add interface to support kvm/xen vcpu preempted check
...NFIG_PARAVIRT_SPINLOCKS)
> DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)");
> +DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "movl $0, %eax");
> #endif
>
> unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
> @@ -26,6 +27,7 @@ unsigned paravirt_patch_ident_64(void *i
> }
>
> extern bool pv_is_native_spin_unlock(void);
> +extern bool pv_is_native_vcpu_is_preempted(void);
>
> unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
> unsigned long addr, unsigned len)
> @@ -54,6 +56,12 @@ unsigned native_patch(u8 type, u...
2017 Sep 05
7
[PATCH 0/4] make virt_spin_lock() a pvops function
With virt_spin_lock() being a pvops function the bare metal case can be
optimized by patching the call away completely. In case a kernel running
as a guest it can decide whether to use paravitualized spinlocks, the
current fallback to the unfair test-and-set scheme, or to mimic the
bare metal behavior.
Juergen Gross (4):
paravirt: add generic _paravirt_false() function
paravirt: switch
2017 Sep 05
7
[PATCH 0/4] make virt_spin_lock() a pvops function
With virt_spin_lock() being a pvops function the bare metal case can be
optimized by patching the call away completely. In case a kernel running
as a guest it can decide whether to use paravitualized spinlocks, the
current fallback to the unfair test-and-set scheme, or to mimic the
bare metal behavior.
Juergen Gross (4):
paravirt: add generic _paravirt_false() function
paravirt: switch
2015 Mar 16
0
[PATCH 9/9] qspinlock, x86, kvm: Implement KVM support for paravirt qspinlock
...;
+#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUE_SPINLOCKS)
+DEF_NATIVE(pv_lock_ops, queue_spin_unlock, "movb $0, (%eax)");
+#endif
+
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
{
/* arg in %eax, return in %eax */
@@ -24,6 +28,8 @@ unsigned paravirt_patch_ident_64(void *i
return 0;
}
+extern bool pv_is_native_spin_unlock(void);
+
unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
unsigned long addr, unsigned len)
{
@@ -47,14 +53,22 @@ unsigned native_patch(u8 type, u16 clobb
PATCH_SITE(pv_mmu_ops, write_cr3);
PATCH_SITE(pv_cpu_ops...
2015 Mar 16
0
[PATCH 9/9] qspinlock, x86, kvm: Implement KVM support for paravirt qspinlock
...;
+#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUE_SPINLOCKS)
+DEF_NATIVE(pv_lock_ops, queue_spin_unlock, "movb $0, (%eax)");
+#endif
+
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
{
/* arg in %eax, return in %eax */
@@ -24,6 +28,8 @@ unsigned paravirt_patch_ident_64(void *i
return 0;
}
+extern bool pv_is_native_spin_unlock(void);
+
unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
unsigned long addr, unsigned len)
{
@@ -47,14 +53,22 @@ unsigned native_patch(u8 type, u16 clobb
PATCH_SITE(pv_mmu_ops, write_cr3);
PATCH_SITE(pv_cpu_ops...
2018 Aug 10
0
[PATCH 01/10] x86/paravirt: make paravirt_patch_call() and paravirt_patch_jmp() static
...ch/x86/include/asm/paravirt_types.h
index 180bc0bff0fb..036b2f88f105 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -370,12 +370,6 @@ extern struct pv_lock_ops pv_lock_ops;
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
-unsigned paravirt_patch_call(void *insnbuf,
- const void *target, u16 tgt_clobbers,
- unsigned long addr, u16 site_clobbers,
- unsigned len);
-unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
- unsigned long addr, unsigned len)...
2018 Aug 13
0
[PATCH v2 01/11] x86/paravirt: make paravirt_patch_call() and paravirt_patch_jmp() static
...ch/x86/include/asm/paravirt_types.h
index 180bc0bff0fb..036b2f88f105 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -370,12 +370,6 @@ extern struct pv_lock_ops pv_lock_ops;
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
-unsigned paravirt_patch_call(void *insnbuf,
- const void *target, u16 tgt_clobbers,
- unsigned long addr, u16 site_clobbers,
- unsigned len);
-unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
- unsigned long addr, unsigned len)...
2017 Oct 04
0
[PATCH 04/13] x86/paravirt: Convert DEF_NATIVE macro to GCC extended asm syntax
...\
+ asm volatile(NATIVE_LABEL("start_", ops, name) \
+ code \
+ NATIVE_LABEL("end_", ops, name) : ); \
+} \
+__visible extern const char start_##ops##_##name[], end_##ops##_##name[];
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 0549c5f2c1b3..4b89668f2862 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -7,14 +7,14 @@
#include <asm/nops.h>
#ifd...
2015 Apr 30
12
[PATCH 0/6] x86: reduce paravirtualized spinlock overhead
Paravirtualized spinlocks produce some overhead even if the kernel is
running on bare metal. The main reason are the more complex locking
and unlocking functions. Especially unlocking is no longer just one
instruction but so complex that it is no longer inlined.
This patch series addresses this issue by adding two more pvops
functions to reduce the size of the inlined spinlock functions. When
2015 Apr 30
12
[PATCH 0/6] x86: reduce paravirtualized spinlock overhead
Paravirtualized spinlocks produce some overhead even if the kernel is
running on bare metal. The main reason are the more complex locking
and unlocking functions. Especially unlocking is no longer just one
instruction but so complex that it is no longer inlined.
This patch series addresses this issue by adding two more pvops
functions to reduce the size of the inlined spinlock functions. When
2015 Nov 18
0
[PATCH 3/3] x86: usergs_sysret32 pv op is no longer needed
...gs, pv_cpu_ops, swapgs);
BLANK();
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index c55f437..8c19b4d 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -162,7 +162,6 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
ret = paravirt_patch_ident_64(insnbuf, len);
else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
- type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) ||
type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64))
/* If operation requires a jmp, then jmp */
ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
@@ -217,7...
2020 May 18
0
[PATCH 4.4 67/86] x86/paravirt: Remove the unused irq_enable_sysexit pv op
..._enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
-#endif
OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
#endif
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -168,9 +168,6 @@ unsigned paravirt_patch_default(u8 type,
ret = paravirt_patch_ident_64(insnbuf, len);
else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
-#ifdef CONFIG_X86_32
- type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
-#endif
type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) ||
type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64))
/* If operation requ...
2015 Nov 18
0
[PATCH 2/3] x86: irq_enable_sysexit pv op is no longer needed
...cr2, pv_mmu_ops, read_cr2);
#endif
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index c2130ae..c55f437 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -162,9 +162,6 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
ret = paravirt_patch_ident_64(insnbuf, len);
else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
-#ifdef CONFIG_X86_32
- type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
-#endif
type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) ||
type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64))
/* If operation requ...
2015 Nov 18
8
[PATCH 0/3] Fix and cleanup for 32-bit PV sysexit
The first patch fixes Xen PV regression introduced by 32-bit rewrite. Unlike the
earlier version it uses ALTERNATIVE instruction and avoids using xen_sysexit
(and sysret32 in compat mode) pv ops, as suggested by Andy. (I ended up patching
TEST with XOR to avoid extra NOPs, even though I said yesterday it would be
wrong. It's not wrong)
As result of this patch irq_enable_sysexit and