Displaying 20 results from an estimated 33 matches for "static_branch_unlikely".
2020 Jul 06
0
[PATCH v3 2/6] powerpc/pseries: move some PAPR paravirt functions to their own file
...__KERNEL__
+
+#include <linux/jump_label.h>
+#include <asm/smp.h>
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+#include <asm/hvcall.h>
+#endif
+
+#ifdef CONFIG_PPC_SPLPAR
+DECLARE_STATIC_KEY_FALSE(shared_processor);
+
+static inline bool is_shared_processor(void)
+{
+	return static_branch_unlikely(&shared_processor);
+}
+
+/* If bit 0 is set, the cpu has been preempted */
+static inline u32 yield_count_of(int cpu)
+{
+	__be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count);
+	return be32_to_cpu(yield_count);
+}
+
+static inline void yield_to_preempted(int cpu, u32 yield_count)
+{
+	p...
2020 Aug 24
0
[PATCH v6 46/76] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
...ol handle_vc_boot_ghcb(struct pt_regs *regs);
 
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+extern struct static_key_false sev_es_enable_key;
+extern void __sev_es_ist_enter(struct pt_regs *regs);
+extern void __sev_es_ist_exit(void);
+static __always_inline void sev_es_ist_enter(struct pt_regs *regs)
+{
+	if (static_branch_unlikely(&sev_es_enable_key))
+		__sev_es_ist_enter(regs);
+}
+static __always_inline void sev_es_ist_exit(void)
+{
+	if (static_branch_unlikely(&sev_es_enable_key))
+		__sev_es_ist_exit();
+}
+#else
+static inline void sev_es_ist_enter(struct pt_regs *regs) { }
+static inline void sev_es_ist_exit(v...
2020 Sep 16
0
[PATCH] mm: remove extra ZONE_DEVICE struct page refcount
...hecked in
the actual page free path instead of for each refcount manipulation,
so a good old unlikely is probably enough.
Also free_devmap_managed_page can move to mm/internal.h.
> +#ifdef CONFIG_DEV_PAGEMAP_OPS
> +static void __put_devmap_managed_page(struct page *page)
> +{
> +	if (!static_branch_unlikely(&devmap_managed_key))
> +		return;
> +
> +	switch (page->pgmap->type) {
> +	case MEMORY_DEVICE_PRIVATE:
> +	case MEMORY_DEVICE_FS_DAX:
> +		free_devmap_managed_page(page);
> +		break;
> +	default:
> +		break;
> +	}
> +}
> +#else
> +static inline void...
2020 Sep 14
5
[PATCH] mm: remove extra ZONE_DEVICE struct page refcount
...inux/mm.h
@@ -1093,34 +1093,6 @@ static inline bool is_zone_device_page(const struct page *page)
 #ifdef CONFIG_DEV_PAGEMAP_OPS
 void free_devmap_managed_page(struct page *page);
 DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
-
-static inline bool page_is_devmap_managed(struct page *page)
-{
-	if (!static_branch_unlikely(&devmap_managed_key))
-		return false;
-	if (!is_zone_device_page(page))
-		return false;
-	switch (page->pgmap->type) {
-	case MEMORY_DEVICE_PRIVATE:
-	case MEMORY_DEVICE_FS_DAX:
-		return true;
-	default:
-		break;
-	}
-	return false;
-}
-
-void put_devmap_managed_page(struct page *page...
2020 Sep 17
0
[PATCH] mm: remove extra ZONE_DEVICE struct page refcount
...r each refcount manipulation,
> so a good old unlikely is probably enough.
> 
> Also free_devmap_managed_page can move to mm/internal.h.
Good suggestion.
>> +#ifdef CONFIG_DEV_PAGEMAP_OPS
>> +static void __put_devmap_managed_page(struct page *page)
>> +{
>> +	if (!static_branch_unlikely(&devmap_managed_key))
>> +		return;
>> +
>> +	switch (page->pgmap->type) {
>> +	case MEMORY_DEVICE_PRIVATE:
>> +	case MEMORY_DEVICE_FS_DAX:
>> +		free_devmap_managed_page(page);
>> +		break;
>> +	default:
>> +		break;
>> +	}
>...
2020 Jul 15
2
[PATCH v4 45/75] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
...4 unconditional extra CALL+RET, a LOAD
(which will potentially miss) and a compare and branch.
How's that a win for normal people? Can we please turn all these
sev_es_*() hooks into something like:
DECLARE_STATIC_KEY_FALSE(sev_es_enabled_key);
static __always_inline void sev_es_foo()
{
	if (static_branch_unlikely(&sev_es_enabled_key))
		__sev_es_foo();
}
So that normal people will only see an extra NOP?
> diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c
> index d415368f16ec..2a7cc72db1d5 100644
> --- a/arch/x86/kernel/sev-es.c
> +++ b/arch/x86/kernel/sev-es.c
> @@ -78,6...
2020 Jul 15
2
[PATCH v4 45/75] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
...4 unconditional extra CALL+RET, a LOAD
(which will potentially miss) and a compare and branch.
How's that a win for normal people? Can we please turn all these
sev_es_*() hooks into something like:
DECLARE_STATIC_KEY_FALSE(sev_es_enabled_key);
static __always_inline void sev_es_foo()
{
	if (static_branch_unlikely(&sev_es_enabled_key))
		__sev_es_foo();
}
So that normal people will only see an extra NOP?
> diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c
> index d415368f16ec..2a7cc72db1d5 100644
> --- a/arch/x86/kernel/sev-es.c
> +++ b/arch/x86/kernel/sev-es.c
> @@ -78,6...
2023 Aug 16
1
[PATCH vhost v13 05/12] virtio_ring: introduce virtqueue_dma_dev()
...y page, just the first page of the COMP pages.
> > > > >
> > > > > So I think there is no impact.
> > > >
> > > > Nope, see this:
> > > >
> > > >         if (SKB_FRAG_PAGE_ORDER &&
> > > >             !static_branch_unlikely(&net_high_order_alloc_disable_key)) {
> > > >                 /* Avoid direct reclaim but allow kswapd to wake */
> > > >                 pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
> > > >                                           __GFP_...
2020 Feb 07
0
[RFC PATCH v7 70/78] KVM: introspection: restore the state of MSR interception on unhook
...mx_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
 	if (!cpu_has_vmx_msr_bitmap())
 		return;
 
+#ifdef CONFIG_KVM_INTROSPECTION
+	if ((type & MSR_TYPE_W) &&
+	    kvmi_monitor_msrw_intercept(vcpu, msr, false))
+		type &= ~MSR_TYPE_W;
+#endif /* CONFIG_KVM_INTROSPECTION */
+
 	if (static_branch_unlikely(&enable_evmcs))
 		evmcs_touch_msr_bitmap();
 
@@ -3669,6 +3675,11 @@ static __always_inline void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
 	if (!cpu_has_vmx_msr_bitmap())
 		return;
 
+#ifdef CONFIG_KVM_INTROSPECTION
+	if (type & MSR_TYPE_W)
+		kvmi_monitor_msrw_intercept(vcpu,...
2020 Sep 26
1
[PATCH 2/2] mm: remove extra ZONE_DEVICE struct page refcount
...ice_page(const struct page *page)
>  }
>  #endif
>  
> -#ifdef CONFIG_DEV_PAGEMAP_OPS
> -void free_devmap_managed_page(struct page *page);
> -DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
> -
> -static inline bool page_is_devmap_managed(struct page *page)
> -{
> -	if (!static_branch_unlikely(&devmap_managed_key))
> -		return false;
> -	if (!is_zone_device_page(page))
> -		return false;
> -	switch (page->pgmap->type) {
> -	case MEMORY_DEVICE_PRIVATE:
> -	case MEMORY_DEVICE_FS_DAX:
> -		return true;
> -	default:
> -		break;
> -	}
> -	return fals...
2020 Jul 15
0
[PATCH v4 45/75] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
On Wed, Jul 15, 2020 at 11:47:02AM +0200, Peter Zijlstra wrote:
> On Tue, Jul 14, 2020 at 02:08:47PM +0200, Joerg Roedel wrote:
> DECLARE_STATIC_KEY_FALSE(sev_es_enabled_key);
> 
> static __always_inline void sev_es_foo()
> {
> 	if (static_branch_unlikely(&sev_es_enabled_key))
> 		__sev_es_foo();
> }
> 
> So that normal people will only see an extra NOP?
Yes, that is a good idea, I will use a static key for these cases.
> > +static bool on_vc_stack(unsigned long sp)
> 
> noinstr or __always_inline
Will add __always_inl...
2020 Sep 07
0
[PATCH v7 71/72] x86/efi: Add GHCB mappings when SEV-ES is active
...#include <asm/trap_pf.h>
diff --git a/arch/x86/include/asm/sev-es.h b/arch/x86/include/asm/sev-es.h
index e919f09ae33c..cf1d957c7091 100644
--- a/arch/x86/include/asm/sev-es.h
+++ b/arch/x86/include/asm/sev-es.h
@@ -102,11 +102,13 @@ static __always_inline void sev_es_nmi_complete(void)
 	if (static_branch_unlikely(&sev_es_enable_key))
 		__sev_es_nmi_complete();
 }
+extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd);
 #else
 static inline void sev_es_ist_enter(struct pt_regs *regs) { }
 static inline void sev_es_ist_exit(void) { }
 static inline int sev_es_setup_ap_jump_table(struct real_mode_header *rmh...
2020 Aug 24
0
[PATCH v6 69/76] x86/realmode: Setup AP jump table
...ned int bits)
 	return (val & mask);
 }
 
+struct real_mode_header;
+enum stack_type;
+
 /* Early IDT entry points for #VC handler */
 extern void vc_no_ghcb(void);
 extern bool handle_vc_boot_ghcb(struct pt_regs *regs);
@@ -91,9 +94,11 @@ static __always_inline void sev_es_ist_exit(void)
 	if (static_branch_unlikely(&sev_es_enable_key))
 		__sev_es_ist_exit();
 }
+extern int sev_es_setup_ap_jump_table(struct real_mode_header *rmh);
 #else
 static inline void sev_es_ist_enter(struct pt_regs *regs) { }
 static inline void sev_es_ist_exit(void) { }
+static inline int sev_es_setup_ap_jump_table(struct real_mod...
2020 Jul 03
7
[PATCH v2 0/6] powerpc: queued spinlocks and rwlocks
v2 is updated to account for feedback from Will, Peter, and
Waiman (thank you), and trims off a couple of RFC and unrelated
patches.
Thanks,
Nick
Nicholas Piggin (6):
  powerpc/powernv: must include hvcall.h to get PAPR defines
  powerpc/pseries: move some PAPR paravirt functions to their own file
  powerpc: move spinlock implementation to simple_spinlock
  powerpc/64s: implement queued
2020 Jul 24
8
[PATCH v4 0/6] powerpc: queued spinlocks and rwlocks
Updated with everybody's feedback (thanks all), and more performance
results.
What I've found is I might have been measuring the worst load point for
the paravirt case, and by looking at a range of loads it's clear that
queued spinlocks are overall better even on PV, doubly so when you look
at the generally much improved worst case latencies.
I have defaulted it to N even though
2020 Sep 25
6
[RFC PATCH v2 0/2] mm: remove extra ZONE_DEVICE struct page refcount
Matthew Wilcox, Ira Weiny, and others have complained that ZONE_DEVICE
struct page reference counting is ugly because they are "free" when the
reference count is one instead of zero. This leads to explicit checks
for ZONE_DEVICE pages in places like put_page(), GUP, THP splitting, and
page migration which have to adjust the expected reference count when
determining if the page is
2020 Sep 25
0
[PATCH 2/2] mm: remove extra ZONE_DEVICE struct page refcount
...-1092,39 +1092,6 @@ static inline bool is_zone_device_page(const struct page *page)
 }
 #endif
 
-#ifdef CONFIG_DEV_PAGEMAP_OPS
-void free_devmap_managed_page(struct page *page);
-DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
-
-static inline bool page_is_devmap_managed(struct page *page)
-{
-	if (!static_branch_unlikely(&devmap_managed_key))
-		return false;
-	if (!is_zone_device_page(page))
-		return false;
-	switch (page->pgmap->type) {
-	case MEMORY_DEVICE_PRIVATE:
-	case MEMORY_DEVICE_FS_DAX:
-		return true;
-	default:
-		break;
-	}
-	return false;
-}
-
-void put_devmap_managed_page(struct page *page...
2020 Jul 02
12
[PATCH 0/8] powerpc: queued spinlocks and rwlocks
This series adds an option to use queued spinlocks for powerpc, and
makes it the default for the Book3S-64 subarch.
This effort starts with the generic code so it's very simple but
still very performant. There are optimisations that can be made to
slowpaths, but I think it's better to attack those incrementally
if/when we find things, and try to add the improvements to generic
code as
2020 Jul 06
13
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
v3 is updated to use __pv_queued_spin_unlock, noticed by Waiman (thank you).
Thanks,
Nick
Nicholas Piggin (6):
  powerpc/powernv: must include hvcall.h to get PAPR defines
  powerpc/pseries: move some PAPR paravirt functions to their own file
  powerpc: move spinlock implementation to simple_spinlock
  powerpc/64s: implement queued spinlocks and rwlocks
  powerpc/pseries: implement paravirt
2020 Jul 06
13
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
v3 is updated to use __pv_queued_spin_unlock, noticed by Waiman (thank you).
Thanks,
Nick
Nicholas Piggin (6):
  powerpc/powernv: must include hvcall.h to get PAPR defines
  powerpc/pseries: move some PAPR paravirt functions to their own file
  powerpc: move spinlock implementation to simple_spinlock
  powerpc/64s: implement queued spinlocks and rwlocks
  powerpc/pseries: implement paravirt