search for: __this_cpu_read

Displaying 20 results from an estimated 82 matches for "__this_cpu_read".

2013 Aug 21
1
[PATCH-v3 1/4] idr: Percpu ida
...> +{ > + int tag = -ENOSPC; > + > + spin_lock(&tags->lock); Interupts are already disabled. Drop the spinlock. > + if (tags->nr_free) > + tag = tags->freelist[--tags->nr_free]; You can keep this or avoid address calculation through segment prefixes. F.e. if (__this_cpu_read(tags->nrfree) { int n = __this_cpu_dec_return(tags->nr_free); tag = __this_cpu_read(tags->freelist[n]); } > + spin_unlock(&tags->lock); Drop. > + * Returns a tag - an integer in the range [0..nr_tags) (passed to > + * tag_pool_init()), or otherwise -ENOSPC on allocati...
2013 Aug 21
1
[PATCH-v3 1/4] idr: Percpu ida
...> +{ > + int tag = -ENOSPC; > + > + spin_lock(&tags->lock); Interupts are already disabled. Drop the spinlock. > + if (tags->nr_free) > + tag = tags->freelist[--tags->nr_free]; You can keep this or avoid address calculation through segment prefixes. F.e. if (__this_cpu_read(tags->nrfree) { int n = __this_cpu_dec_return(tags->nr_free); tag = __this_cpu_read(tags->freelist[n]); } > + spin_unlock(&tags->lock); Drop. > + * Returns a tag - an integer in the range [0..nr_tags) (passed to > + * tag_pool_init()), or otherwise -ENOSPC on allocati...
2020 Jul 15
2
[PATCH v4 45/75] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
...I cannot find the patch touching nmi_enter/exit()? > + */ > +void noinstr sev_es_ist_enter(struct pt_regs *regs) > +{ > + unsigned long old_ist, new_ist; > + unsigned long *p; > + > + if (!sev_es_active()) > + return; > + > + /* Read old IST entry */ > + old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]); > + > + /* Make room on the IST stack */ > + if (on_vc_stack(regs->sp)) > + new_ist = ALIGN_DOWN(regs->sp, 8) - sizeof(old_ist); > + else > + new_ist = old_ist - sizeof(old_ist); > + > + /* Store old IST entry */ > + p...
2020 Jul 15
2
[PATCH v4 45/75] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
...I cannot find the patch touching nmi_enter/exit()? > + */ > +void noinstr sev_es_ist_enter(struct pt_regs *regs) > +{ > + unsigned long old_ist, new_ist; > + unsigned long *p; > + > + if (!sev_es_active()) > + return; > + > + /* Read old IST entry */ > + old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]); > + > + /* Make room on the IST stack */ > + if (on_vc_stack(regs->sp)) > + new_ist = ALIGN_DOWN(regs->sp, 8) - sizeof(old_ist); > + else > + new_ist = old_ist - sizeof(old_ist); > + > + /* Store old IST entry */ > + p...
2020 Aug 24
0
[PATCH v6 45/76] x86/sev-es: Allocate and Map IST stack for #VC handler
...TKSZ) }; /* @@ -40,6 +44,8 @@ enum exception_stack_ordering { ESTACK_NMI, ESTACK_DB, ESTACK_MCE, + ESTACK_VC, + ESTACK_VC2, N_EXCEPTION_STACKS }; @@ -139,4 +145,7 @@ static inline struct entry_stack *cpu_entry_stack(int cpu) #define __this_cpu_ist_top_va(name) \ CEA_ESTACK_TOP(__this_cpu_read(cea_exception_stacks), name) +#define __this_cpu_ist_bot_va(name) \ + CEA_ESTACK_BOT(__this_cpu_read(cea_exception_stacks), name) + #endif diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 288b065955b7..d0c6c10c18a0 100644 --- a/arch/x86/include/a...
2015 Apr 08
1
[Xen-devel] [PATCH v15 12/15] pvqspinlock, x86: Enable PV qspinlock for Xen
...necessary Xen specific code to allow Xen to > support the CPU halting and kicking operations needed by the queue > spinlock PV code. This basically looks the same as the version I wrote, except I think you broke it. > +static void xen_qlock_wait(u8 *byte, u8 val) > +{ > + int irq = __this_cpu_read(lock_kicker_irq); > + > + /* If kicker interrupts not initialized yet, just spin */ > + if (irq == -1) > + return; > + > + /* clear pending */ > + xen_clear_irq_pending(irq); > + > + /* > + * We check the byte value after clearing pending IRQ to make sure > + * t...
2015 Apr 08
1
[Xen-devel] [PATCH v15 12/15] pvqspinlock, x86: Enable PV qspinlock for Xen
...necessary Xen specific code to allow Xen to > support the CPU halting and kicking operations needed by the queue > spinlock PV code. This basically looks the same as the version I wrote, except I think you broke it. > +static void xen_qlock_wait(u8 *byte, u8 val) > +{ > + int irq = __this_cpu_read(lock_kicker_irq); > + > + /* If kicker interrupts not initialized yet, just spin */ > + if (irq == -1) > + return; > + > + /* clear pending */ > + xen_clear_irq_pending(irq); > + > + /* > + * We check the byte value after clearing pending IRQ to make sure > + * t...
2015 Mar 19
0
[Xen-devel] [PATCH 0/9] qspinlock stuff -v15
...EFINE_PER_CPU(int, lock_kicker_irq) = -1; +static DEFINE_PER_CPU(char *, irq_name); +static bool xen_pvspin = true; + +#ifdef CONFIG_QUEUE_SPINLOCK + +#include <asm/qspinlock.h> + +PV_CALLEE_SAVE_REGS_THUNK(__pv_queue_spin_unlock); + +static void xen_qlock_wait(u8 *ptr, u8 val) +{ + int irq = __this_cpu_read(lock_kicker_irq); + + xen_clear_irq_pending(irq); + + barrier(); + + if (READ_ONCE(*ptr) == val) + xen_poll_irq(irq); +} + +static void xen_qlock_kick(int cpu) +{ + xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); +} + +#else + struct xen_lock_waiting { struct arch_spinlock *lock; __ticket_t wa...
2015 Mar 19
0
[Xen-devel] [PATCH 0/9] qspinlock stuff -v15
...EFINE_PER_CPU(int, lock_kicker_irq) = -1; +static DEFINE_PER_CPU(char *, irq_name); +static bool xen_pvspin = true; + +#ifdef CONFIG_QUEUE_SPINLOCK + +#include <asm/qspinlock.h> + +PV_CALLEE_SAVE_REGS_THUNK(__pv_queue_spin_unlock); + +static void xen_qlock_wait(u8 *ptr, u8 val) +{ + int irq = __this_cpu_read(lock_kicker_irq); + + xen_clear_irq_pending(irq); + + barrier(); + + if (READ_ONCE(*ptr) == val) + xen_poll_irq(irq); +} + +static void xen_qlock_kick(int cpu) +{ + xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); +} + +#else + struct xen_lock_waiting { struct arch_spinlock *lock; __ticket_t wa...
2015 Apr 07
0
[PATCH v15 12/15] pvqspinlock, x86: Enable PV qspinlock for Xen
...true; + +#ifdef CONFIG_QUEUE_SPINLOCK + +#include <asm/qspinlock.h> + +static void xen_qlock_kick(int cpu) +{ + xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); +} + +/* + * Halt the current CPU & release it back to the host + */ +static void xen_qlock_wait(u8 *byte, u8 val) +{ + int irq = __this_cpu_read(lock_kicker_irq); + + /* If kicker interrupts not initialized yet, just spin */ + if (irq == -1) + return; + + /* clear pending */ + xen_clear_irq_pending(irq); + + /* + * We check the byte value after clearing pending IRQ to make sure + * that we won't miss a wakeup event because of the cle...
2020 Jul 15
0
[PATCH v4 45/75] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
...; > + */ > > +void noinstr sev_es_ist_enter(struct pt_regs *regs) > > +{ > > + unsigned long old_ist, new_ist; > > + unsigned long *p; > > + > > + if (!sev_es_active()) > > + return; > > + > > + /* Read old IST entry */ > > + old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]); > > + > > + /* Make room on the IST stack */ > > + if (on_vc_stack(regs->sp)) > > + new_ist = ALIGN_DOWN(regs->sp, 8) - sizeof(old_ist); > > + else > > + new_ist = old_ist - sizeof(old_ist); > > + > > + /...
2014 Apr 02
0
[PATCH v8 10/10] pvqspinlock, x86: Enable qspinlock PV support for XEN
...ER_CPU(int, lock_kicker_irq) = -1; -static DEFINE_PER_CPU(char *, irq_name); static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting); static cpumask_t waiting_cpus; -static bool xen_pvspin = true; __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) { int irq = __this_cpu_read(lock_kicker_irq); @@ -213,6 +216,94 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next) } } +#else /* CONFIG_QUEUE_SPINLOCK */ + +#ifdef CONFIG_XEN_DEBUG_FS +static u32 kick_stats; /* CPU kick count */ +static u32 kick_nohalt_stats; /* Kick but not halt count */ +st...
2020 Aug 11
3
[PATCH] x86/paravirt: Add missing noinstr to arch_local*() helpers
On Tue, Aug 11, 2020 at 11:20:54AM +0200, peterz at infradead.org wrote: > On Tue, Aug 11, 2020 at 10:38:50AM +0200, J?rgen Gro? wrote: > > In case you don't want to do it I can send the patch for the Xen > > variants. > > I might've opened a whole new can of worms here. I'm not sure we > can/want to fix the entire fallout this release :/ > > Let me
2020 Aug 11
3
[PATCH] x86/paravirt: Add missing noinstr to arch_local*() helpers
On Tue, Aug 11, 2020 at 11:20:54AM +0200, peterz at infradead.org wrote: > On Tue, Aug 11, 2020 at 10:38:50AM +0200, J?rgen Gro? wrote: > > In case you don't want to do it I can send the patch for the Xen > > variants. > > I might've opened a whole new can of worms here. I'm not sure we > can/want to fix the entire fallout this release :/ > > Let me
2015 Jan 20
0
[PATCH v14 11/11] pvqspinlock, x86: Enable PV qspinlock for XEN
...ER_CPU(int, lock_kicker_irq) = -1; -static DEFINE_PER_CPU(char *, irq_name); static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting); static cpumask_t waiting_cpus; -static bool xen_pvspin = true; __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) { int irq = __this_cpu_read(lock_kicker_irq); @@ -213,6 +216,118 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next) } } +#else /* CONFIG_QUEUE_SPINLOCK */ + +#ifdef CONFIG_XEN_DEBUG_FS +static u32 kick_nohlt_stats; /* Kick but not halt count */ +static u32 halt_qhead_stats; /* Queue head haltin...
2015 Jan 20
0
[PATCH v14 11/11] pvqspinlock, x86: Enable PV qspinlock for XEN
...ER_CPU(int, lock_kicker_irq) = -1; -static DEFINE_PER_CPU(char *, irq_name); static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting); static cpumask_t waiting_cpus; -static bool xen_pvspin = true; __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) { int irq = __this_cpu_read(lock_kicker_irq); @@ -213,6 +216,118 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next) } } +#else /* CONFIG_QUEUE_SPINLOCK */ + +#ifdef CONFIG_XEN_DEBUG_FS +static u32 kick_nohlt_stats; /* Kick but not halt count */ +static u32 halt_qhead_stats; /* Queue head haltin...
2020 Jul 14
0
[PATCH v4 45/75] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
...Otherwise a + * nested nmi_exit() call (#VC->NMI->#DB) may back-adjust the IST entry + * too early. + */ +void noinstr sev_es_ist_enter(struct pt_regs *regs) +{ + unsigned long old_ist, new_ist; + unsigned long *p; + + if (!sev_es_active()) + return; + + /* Read old IST entry */ + old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]); + + /* Make room on the IST stack */ + if (on_vc_stack(regs->sp)) + new_ist = ALIGN_DOWN(regs->sp, 8) - sizeof(old_ist); + else + new_ist = old_ist - sizeof(old_ist); + + /* Store old IST entry */ + p = (unsigned long *)new_ist; + *p = old_i...
2020 Aug 24
0
[PATCH v6 46/76] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
...* unconditionally back-adjusted in sev_es_ist_exit(). Otherwise a nested + * sev_es_ist_exit() call may back-adjust the IST entry too early. + */ +void noinstr __sev_es_ist_enter(struct pt_regs *regs) +{ + unsigned long old_ist, new_ist; + unsigned long *p; + + /* Read old IST entry */ + old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]); + + /* Make room on the IST stack */ + if (on_vc_stack(regs->sp)) + new_ist = ALIGN_DOWN(regs->sp, 8) - sizeof(old_ist); + else + new_ist = old_ist - sizeof(old_ist); + + /* Store old IST entry */ + p = (unsigned long *)new_ist; + *p = old_i...
2020 Nov 03
0
[patch V3 19/37] mm/highmem: Remove the old kmap_atomic cruft
...tic inline int kmap_atomic_idx_push(void) -{ - int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; - -#ifdef CONFIG_DEBUG_HIGHMEM - WARN_ON_ONCE(in_irq() && !irqs_disabled()); - BUG_ON(idx >= KM_TYPE_NR); -#endif - return idx; -} - -static inline int kmap_atomic_idx(void) -{ - return __this_cpu_read(__kmap_atomic_idx) - 1; -} -static inline void kmap_atomic_idx_pop(void) -{ -#ifdef CONFIG_DEBUG_HIGHMEM - int idx = __this_cpu_dec_return(__kmap_atomic_idx); - - BUG_ON(idx < 0); -#else - __this_cpu_dec(__kmap_atomic_idx); -#endif -} -#endif -#endif +#endif /* CONFIG_HIGHMEM */ /* * Prev...
2020 Apr 28
0
[PATCH v3 45/75] x86/dumpstack/64: Handle #VC exception stacks
...), }; +static bool in_vc_exception_stack(unsigned long *stack, struct stack_info *info) +{ +#ifdef CONFIG_AMD_MEM_ENCRYPT + unsigned long begin, end, stk = (unsigned long)stack; + struct cea_vmm_exception_stacks *vc_stacks; + struct pt_regs *regs; + enum stack_type type; + int i; + + vc_stacks = __this_cpu_read(cea_vmm_exception_stacks); + + /* Already initialized? */ + if (!vc_stacks) + return false; + + for (i = 0; i < N_VC_STACKS; i++) { + type = STACK_TYPE_VC_LAST - i; + begin = (unsigned long)vc_stacks->stacks[i].stack; + end = begin + sizeof(vc_stacks->stacks[i].stack); + + if (stk...