search for: __visible

Displaying 20 results from an estimated 222 matches for "__visible".

2013 Dec 15
0
[PATCH v3 [resend] 15/18] smp, x86: kill SMP single function call interrupt
...interrupt(void); #define trace_irq_move_cleanup_interrupt irq_move_cleanup_interrupt #define trace_reboot_interrupt reboot_interrupt #define trace_kvm_posted_intr_ipi kvm_posted_intr_ipi @@ -182,7 +180,6 @@ extern asmlinkage void smp_irq_move_cleanup_interrupt(void); #ifdef CONFIG_SMP extern __visible void smp_reschedule_interrupt(struct pt_regs *); extern __visible void smp_call_function_interrupt(struct pt_regs *); -extern __visible void smp_call_function_single_interrupt(struct pt_regs *); extern __visible void smp_invalidate_interrupt(struct pt_regs *); #endif diff --git a/arch/x86/incl...
2013 Dec 15
0
[PATCH v3 [resend] 15/18] smp, x86: kill SMP single function call interrupt
...interrupt(void); #define trace_irq_move_cleanup_interrupt irq_move_cleanup_interrupt #define trace_reboot_interrupt reboot_interrupt #define trace_kvm_posted_intr_ipi kvm_posted_intr_ipi @@ -182,7 +180,6 @@ extern asmlinkage void smp_irq_move_cleanup_interrupt(void); #ifdef CONFIG_SMP extern __visible void smp_reschedule_interrupt(struct pt_regs *); extern __visible void smp_call_function_interrupt(struct pt_regs *); -extern __visible void smp_call_function_single_interrupt(struct pt_regs *); extern __visible void smp_invalidate_interrupt(struct pt_regs *); #endif diff --git a/arch/x86/incl...
2013 Dec 15
0
[PATCH v3 [resend] 15/18] smp, x86: kill SMP single function call interrupt
...interrupt(void); #define trace_irq_move_cleanup_interrupt irq_move_cleanup_interrupt #define trace_reboot_interrupt reboot_interrupt #define trace_kvm_posted_intr_ipi kvm_posted_intr_ipi @@ -182,7 +180,6 @@ extern asmlinkage void smp_irq_move_cleanup_interrupt(void); #ifdef CONFIG_SMP extern __visible void smp_reschedule_interrupt(struct pt_regs *); extern __visible void smp_call_function_interrupt(struct pt_regs *); -extern __visible void smp_call_function_single_interrupt(struct pt_regs *); extern __visible void smp_invalidate_interrupt(struct pt_regs *); #endif diff --git a/arch/x86/incl...
2013 Dec 04
1
[RFC PATCH v3 19/19] smp, x86: kill SMP single function call interrupt
...interrupt(void); #define trace_irq_move_cleanup_interrupt irq_move_cleanup_interrupt #define trace_reboot_interrupt reboot_interrupt #define trace_kvm_posted_intr_ipi kvm_posted_intr_ipi @@ -182,7 +180,6 @@ extern asmlinkage void smp_irq_move_cleanup_interrupt(void); #ifdef CONFIG_SMP extern __visible void smp_reschedule_interrupt(struct pt_regs *); extern __visible void smp_call_function_interrupt(struct pt_regs *); -extern __visible void smp_call_function_single_interrupt(struct pt_regs *); extern __visible void smp_invalidate_interrupt(struct pt_regs *); #endif diff --git a/arch/x86/incl...
2013 Dec 04
1
[RFC PATCH v3 19/19] smp, x86: kill SMP single function call interrupt
...interrupt(void); #define trace_irq_move_cleanup_interrupt irq_move_cleanup_interrupt #define trace_reboot_interrupt reboot_interrupt #define trace_kvm_posted_intr_ipi kvm_posted_intr_ipi @@ -182,7 +180,6 @@ extern asmlinkage void smp_irq_move_cleanup_interrupt(void); #ifdef CONFIG_SMP extern __visible void smp_reschedule_interrupt(struct pt_regs *); extern __visible void smp_call_function_interrupt(struct pt_regs *); -extern __visible void smp_call_function_single_interrupt(struct pt_regs *); extern __visible void smp_invalidate_interrupt(struct pt_regs *); #endif diff --git a/arch/x86/incl...
2013 Dec 04
1
[RFC PATCH v3 19/19] smp, x86: kill SMP single function call interrupt
...interrupt(void); #define trace_irq_move_cleanup_interrupt irq_move_cleanup_interrupt #define trace_reboot_interrupt reboot_interrupt #define trace_kvm_posted_intr_ipi kvm_posted_intr_ipi @@ -182,7 +180,6 @@ extern asmlinkage void smp_irq_move_cleanup_interrupt(void); #ifdef CONFIG_SMP extern __visible void smp_reschedule_interrupt(struct pt_regs *); extern __visible void smp_call_function_interrupt(struct pt_regs *); -extern __visible void smp_call_function_single_interrupt(struct pt_regs *); extern __visible void smp_invalidate_interrupt(struct pt_regs *); #endif diff --git a/arch/x86/incl...
2020 Aug 24
0
[PATCH v6 48/76] x86/entry/64: Add entry code for #VC handler
...6/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h index 337dcfd45472..5ce67113a761 100644 --- a/arch/x86/include/asm/idtentry.h +++ b/arch/x86/include/asm/idtentry.h @@ -308,6 +308,18 @@ static __always_inline void __##func(struct pt_regs *regs) DECLARE_IDTENTRY_RAW(vector, func); \ __visible void noist_##func(struct pt_regs *regs) +/** + * DECLARE_IDTENTRY_VC - Declare functions for the VC entry point + * @vector: Vector number (ignored for C) + * @func: Function name of the entry point + * + * Maps to DECLARE_IDTENTRY_RAW_ERRORCODE, but declares also the + * safe_stack C handler. +...
2020 May 18
0
[PATCH 4.4 67/86] x86/paravirt: Remove the unused irq_enable_sysexit pv op
...uires a jmp, then jmp */ @@ -226,7 +223,6 @@ static u64 native_steal_clock(int cpu) /* These are in entry.S */ extern void native_iret(void); -extern void native_irq_enable_sysexit(void); extern void native_usergs_sysret32(void); extern void native_usergs_sysret64(void); @@ -385,9 +381,6 @@ __visible struct pv_cpu_ops pv_cpu_ops = .load_sp0 = native_load_sp0, -#if defined(CONFIG_X86_32) - .irq_enable_sysexit = native_irq_enable_sysexit, -#endif #ifdef CONFIG_X86_64 #ifdef CONFIG_IA32_EMULATION .usergs_sysret32 = native_usergs_sysret32, --- a/arch/x86/kernel/paravirt_patch_32.c +++ b/a...
2015 Nov 18
0
[PATCH 2/3] x86: irq_enable_sysexit pv op is no longer needed
...uires a jmp, then jmp */ @@ -220,7 +217,6 @@ static u64 native_steal_clock(int cpu) /* These are in entry.S */ extern void native_iret(void); -extern void native_irq_enable_sysexit(void); extern void native_usergs_sysret32(void); extern void native_usergs_sysret64(void); @@ -379,9 +375,6 @@ __visible struct pv_cpu_ops pv_cpu_ops = { .load_sp0 = native_load_sp0, -#if defined(CONFIG_X86_32) - .irq_enable_sysexit = native_irq_enable_sysexit, -#endif #ifdef CONFIG_X86_64 #ifdef CONFIG_IA32_EMULATION .usergs_sysret32 = native_usergs_sysret32, diff --git a/arch/x86/kernel/paravirt_patch_32....
2017 Feb 15
0
[PATCH v4 1/2] x86/paravirt: Change vcp_is_preempted() arg type to long
..._preempted(long cpu) { return pv_vcpu_is_preempted(cpu); } diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 099fcba..85ed343 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -589,7 +589,7 @@ static void kvm_wait(u8 *ptr, u8 val) local_irq_restore(flags); } -__visible bool __kvm_vcpu_is_preempted(int cpu) +__visible bool __kvm_vcpu_is_preempted(long cpu) { struct kvm_steal_time *src = &per_cpu(steal_time, cpu); diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c index 6259327..8f2d1c9 100644 --- a/arch/x86/kernel/par...
2020 May 18
0
Patch "x86/paravirt: Remove the unused irq_enable_sysexit pv op" has been added to the 4.4-stable tree
...uires a jmp, then jmp */ @@ -226,7 +223,6 @@ static u64 native_steal_clock(int cpu) /* These are in entry.S */ extern void native_iret(void); -extern void native_irq_enable_sysexit(void); extern void native_usergs_sysret32(void); extern void native_usergs_sysret64(void); @@ -385,9 +381,6 @@ __visible struct pv_cpu_ops pv_cpu_ops = .load_sp0 = native_load_sp0, -#if defined(CONFIG_X86_32) - .irq_enable_sysexit = native_irq_enable_sysexit, -#endif #ifdef CONFIG_X86_64 #ifdef CONFIG_IA32_EMULATION .usergs_sysret32 = native_usergs_sysret32, --- a/arch/x86/kernel/paravirt_patch_32.c +++ b/a...
2015 Feb 16
1
[Xen-devel] [PATCH V5] x86 spinlock: Fix memory corruption on completing completions
...static inline void check_zero(void) > { > u8 ret; > - u8 old = ACCESS_ONCE(zero_stats); > + u8 old = READ_ONCE(zero_stats); > if (unlikely(old)) { > ret = cmpxchg(&zero_stats, old, 0); > /* This ensures only one fellow resets the stat */ > @@ -112,6 +112,7 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) > struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting); > int cpu = smp_processor_id(); > u64 start; > + __ticket_t head; > unsigned long flags; > > /* If kicker interrupts not initialized y...
2015 Feb 16
1
[Xen-devel] [PATCH V5] x86 spinlock: Fix memory corruption on completing completions
...static inline void check_zero(void) > { > u8 ret; > - u8 old = ACCESS_ONCE(zero_stats); > + u8 old = READ_ONCE(zero_stats); > if (unlikely(old)) { > ret = cmpxchg(&zero_stats, old, 0); > /* This ensures only one fellow resets the stat */ > @@ -112,6 +112,7 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) > struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting); > int cpu = smp_processor_id(); > u64 start; > + __ticket_t head; > unsigned long flags; > > /* If kicker interrupts not initialized y...
2017 Feb 10
3
[PATCH v2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
...save vcpu_is_preempted; + bool (*vcpu_is_preempted)(int cpu); }; /* This contains all the paravirt structures: we get a convenient diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 099fcba..eb3753d 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -595,7 +595,6 @@ __visible bool __kvm_vcpu_is_preempted(int cpu) return !!src->preempted; } -PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted); /* * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present. @@ -614,10 +613,8 @@ void __init kvm_spinlock_init(void) pv_lock_ops.wait = kvm_wait; pv_lock_op...
2017 Feb 10
3
[PATCH v2] x86/paravirt: Don't make vcpu_is_preempted() a callee-save function
...save vcpu_is_preempted; + bool (*vcpu_is_preempted)(int cpu); }; /* This contains all the paravirt structures: we get a convenient diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 099fcba..eb3753d 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -595,7 +595,6 @@ __visible bool __kvm_vcpu_is_preempted(int cpu) return !!src->preempted; } -PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted); /* * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present. @@ -614,10 +613,8 @@ void __init kvm_spinlock_init(void) pv_lock_ops.wait = kvm_wait; pv_lock_op...
2017 Feb 15
4
[PATCH v4 0/2] x86/kvm: Reduce vcpu_is_preempted() overhead
v3->v4: - Fix x86-32 build error. v2->v3: - Provide an optimized __raw_callee_save___kvm_vcpu_is_preempted() in assembly as suggested by PeterZ. - Add a new patch to change vcpu_is_preempted() argument type to long to ease the writing of the assembly code. v1->v2: - Rerun the fio test on a different system on both bare-metal and a KVM guest. Both sockets were
2017 Feb 15
4
[PATCH v4 0/2] x86/kvm: Reduce vcpu_is_preempted() overhead
v3->v4: - Fix x86-32 build error. v2->v3: - Provide an optimized __raw_callee_save___kvm_vcpu_is_preempted() in assembly as suggested by PeterZ. - Add a new patch to change vcpu_is_preempted() argument type to long to ease the writing of the assembly code. v1->v2: - Rerun the fio test on a different system on both bare-metal and a KVM guest. Both sockets were
2017 Feb 15
3
[PATCH v3 0/2] x86/kvm: Reduce vcpu_is_preempted() overhead
v2->v3: - Provide an optimized __raw_callee_save___kvm_vcpu_is_preempted() in assembly as suggested by PeterZ. - Add a new patch to change vcpu_is_preempted() argument type to long to ease the writing of the assembly code. v1->v2: - Rerun the fio test on a different system on both bare-metal and a KVM guest. Both sockets were utilized in this test. - The commit log was
2017 Feb 15
3
[PATCH v3 0/2] x86/kvm: Reduce vcpu_is_preempted() overhead
v2->v3: - Provide an optimized __raw_callee_save___kvm_vcpu_is_preempted() in assembly as suggested by PeterZ. - Add a new patch to change vcpu_is_preempted() argument type to long to ease the writing of the assembly code. v1->v2: - Rerun the fio test on a different system on both bare-metal and a KVM guest. Both sockets were utilized in this test. - The commit log was
2015 Feb 13
3
[PATCH V4] x86 spinlock: Fix memory corruption on completing completions
.../x86/kernel/kvm.c @@ -609,7 +609,7 @@ static inline void check_zero(void) u8 ret; u8 old; - old = ACCESS_ONCE(zero_stats); + old = READ_ONCE(zero_stats); if (unlikely(old)) { ret = cmpxchg(&zero_stats, old, 0); /* This ensures only one fellow resets the stat */ @@ -727,6 +727,7 @@ __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want) int cpu; u64 start; unsigned long flags; + __ticket_t head; if (in_nmi()) return; @@ -772,7 +773,8 @@ __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want) * check again make sure it didn'...