Displaying 20 results from an estimated 21 matches for "__no_randomize_layout".
2017 Sep 01
0
[RFC PATCH v2 1/7] x86/paravirt: Add pv_idle_ops to paravirt ops
...86/include/asm/paravirt_types.h
>> index 9ffc36b..cf45726 100644
>> --- a/arch/x86/include/asm/paravirt_types.h
>> +++ b/arch/x86/include/asm/paravirt_types.h
>> @@ -324,6 +324,10 @@ struct pv_lock_ops {
>> struct paravirt_callee_save vcpu_is_preempted;
>> } __no_randomize_layout;
>>
>> +struct pv_idle_ops {
>> + void (*poll)(void);
>> +} __no_randomize_layout;
>> +
>> /* This contains all the paravirt structures: we get a convenient
>> * number for each function using the offset which we use to indicate
>> * what t...
2017 Sep 01
0
[RFC PATCH v2 1/7] x86/paravirt: Add pv_idle_ops to paravirt ops
...86/include/asm/paravirt_types.h
>> index 9ffc36b..cf45726 100644
>> --- a/arch/x86/include/asm/paravirt_types.h
>> +++ b/arch/x86/include/asm/paravirt_types.h
>> @@ -324,6 +324,10 @@ struct pv_lock_ops {
>> struct paravirt_callee_save vcpu_is_preempted;
>> } __no_randomize_layout;
>>
>> +struct pv_idle_ops {
>> + void (*poll)(void);
>> +} __no_randomize_layout;
>> +
>> /* This contains all the paravirt structures: we get a convenient
>> * number for each function using the offset which we use to indicate
>> * what t...
2017 Nov 13
2
[PATCH RFC v3 1/6] x86/paravirt: Add pv_idle_ops to paravirt ops
...f --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 10cc3b9..95c0e3e 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -313,6 +313,10 @@ struct pv_lock_ops {
struct paravirt_callee_save vcpu_is_preempted;
} __no_randomize_layout;
+struct pv_idle_ops {
+ void (*poll)(void);
+} __no_randomize_layout;
+
/* This contains all the paravirt structures: we get a convenient
* number for each function using the offset which we use to indicate
* what to patch. */
@@ -323,6 +327,7 @@ struct paravirt_patch_template {
struct pv...
2017 Sep 05
3
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...gt;>> +++ b/arch/x86/include/asm/paravirt_types.h
>>> @@ -319,6 +319,7 @@ struct pv_lock_ops {
>>> void (*kick)(int cpu);
>>>
>>> struct paravirt_callee_save vcpu_is_preempted;
>>> + struct paravirt_callee_save virt_spin_lock;
>>> } __no_randomize_layout;
>>>
>>> /* This contains all the paravirt structures: we get a convenient
>>> diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
>>> index 48a706f641f2..fbd98896385c 100644
>>> --- a/arch/x86/include/asm/qspinlock.h
>...
2017 Sep 05
3
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...gt;>> +++ b/arch/x86/include/asm/paravirt_types.h
>>> @@ -319,6 +319,7 @@ struct pv_lock_ops {
>>> void (*kick)(int cpu);
>>>
>>> struct paravirt_callee_save vcpu_is_preempted;
>>> + struct paravirt_callee_save virt_spin_lock;
>>> } __no_randomize_layout;
>>>
>>> /* This contains all the paravirt structures: we get a convenient
>>> diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
>>> index 48a706f641f2..fbd98896385c 100644
>>> --- a/arch/x86/include/asm/qspinlock.h
>...
2017 Sep 05
2
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...00644
> --- a/arch/x86/include/asm/paravirt_types.h
> +++ b/arch/x86/include/asm/paravirt_types.h
> @@ -319,6 +319,7 @@ struct pv_lock_ops {
> void (*kick)(int cpu);
>
> struct paravirt_callee_save vcpu_is_preempted;
> + struct paravirt_callee_save virt_spin_lock;
> } __no_randomize_layout;
>
> /* This contains all the paravirt structures: we get a convenient
> diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
> index 48a706f641f2..fbd98896385c 100644
> --- a/arch/x86/include/asm/qspinlock.h
> +++ b/arch/x86/include/asm/qspinlock.h...
2017 Sep 05
2
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...00644
> --- a/arch/x86/include/asm/paravirt_types.h
> +++ b/arch/x86/include/asm/paravirt_types.h
> @@ -319,6 +319,7 @@ struct pv_lock_ops {
> void (*kick)(int cpu);
>
> struct paravirt_callee_save vcpu_is_preempted;
> + struct paravirt_callee_save virt_spin_lock;
> } __no_randomize_layout;
>
> /* This contains all the paravirt structures: we get a convenient
> diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
> index 48a706f641f2..fbd98896385c 100644
> --- a/arch/x86/include/asm/qspinlock.h
> +++ b/arch/x86/include/asm/qspinlock.h...
2017 Nov 13
7
[PATCH RFC v3 0/6] x86/idle: add halt poll support
From: Yang Zhang <yang.zhang.wz at gmail.com>
Some latency-intensive workload have seen obviously performance
drop when running inside VM. The main reason is that the overhead
is amplified when running inside VM. The most cost I have seen is
inside idle path.
This patch introduces a new mechanism to poll for a while before
entering idle state. If schedule is needed during poll, then we
2017 Nov 13
7
[PATCH RFC v3 0/6] x86/idle: add halt poll support
From: Yang Zhang <yang.zhang.wz at gmail.com>
Some latency-intensive workload have seen obviously performance
drop when running inside VM. The main reason is that the overhead
is amplified when running inside VM. The most cost I have seen is
inside idle path.
This patch introduces a new mechanism to poll for a while before
entering idle state. If schedule is needed during poll, then we
2017 Sep 05
7
[PATCH 0/4] make virt_spin_lock() a pvops function
With virt_spin_lock() being a pvops function the bare metal case can be
optimized by patching the call away completely. In case a kernel running
as a guest it can decide whether to use paravitualized spinlocks, the
current fallback to the unfair test-and-set scheme, or to mimic the
bare metal behavior.
Juergen Gross (4):
paravirt: add generic _paravirt_false() function
paravirt: switch
2017 Sep 05
7
[PATCH 0/4] make virt_spin_lock() a pvops function
With virt_spin_lock() being a pvops function the bare metal case can be
optimized by patching the call away completely. In case a kernel running
as a guest it can decide whether to use paravitualized spinlocks, the
current fallback to the unfair test-and-set scheme, or to mimic the
bare metal behavior.
Juergen Gross (4):
paravirt: add generic _paravirt_false() function
paravirt: switch
2017 Sep 05
0
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...86/include/asm/paravirt_types.h
>>>> @@ -319,6 +319,7 @@ struct pv_lock_ops {
>>>> void (*kick)(int cpu);
>>>>
>>>> struct paravirt_callee_save vcpu_is_preempted;
>>>> + struct paravirt_callee_save virt_spin_lock;
>>>> } __no_randomize_layout;
>>>>
>>>> /* This contains all the paravirt structures: we get a convenient
>>>> diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
>>>> index 48a706f641f2..fbd98896385c 100644
>>>> --- a/arch/x86/include/...
2017 Sep 05
2
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...00644
> --- a/arch/x86/include/asm/paravirt_types.h
> +++ b/arch/x86/include/asm/paravirt_types.h
> @@ -319,6 +319,7 @@ struct pv_lock_ops {
> void (*kick)(int cpu);
>
> struct paravirt_callee_save vcpu_is_preempted;
> + struct paravirt_callee_save virt_spin_lock;
> } __no_randomize_layout;
>
> /* This contains all the paravirt structures: we get a convenient
> diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
> index 48a706f641f2..fbd98896385c 100644
> --- a/arch/x86/include/asm/qspinlock.h
> +++ b/arch/x86/include/asm/qspinlock.h...
2017 Sep 05
2
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...00644
> --- a/arch/x86/include/asm/paravirt_types.h
> +++ b/arch/x86/include/asm/paravirt_types.h
> @@ -319,6 +319,7 @@ struct pv_lock_ops {
> void (*kick)(int cpu);
>
> struct paravirt_callee_save vcpu_is_preempted;
> + struct paravirt_callee_save virt_spin_lock;
> } __no_randomize_layout;
>
> /* This contains all the paravirt structures: we get a convenient
> diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
> index 48a706f641f2..fbd98896385c 100644
> --- a/arch/x86/include/asm/qspinlock.h
> +++ b/arch/x86/include/asm/qspinlock.h...
2017 Sep 05
0
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...clude/asm/paravirt_types.h
>> +++ b/arch/x86/include/asm/paravirt_types.h
>> @@ -319,6 +319,7 @@ struct pv_lock_ops {
>> void (*kick)(int cpu);
>>
>> struct paravirt_callee_save vcpu_is_preempted;
>> + struct paravirt_callee_save virt_spin_lock;
>> } __no_randomize_layout;
>>
>> /* This contains all the paravirt structures: we get a convenient
>> diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
>> index 48a706f641f2..fbd98896385c 100644
>> --- a/arch/x86/include/asm/qspinlock.h
>> +++ b/arch/x86/...
2018 Aug 10
13
[PATCH 00/10] x86/paravirt: several cleanups
This series removes some no longer needed stuff from paravirt
infrastructure and puts large quantities of paravirt ops under a new
config option PARAVIRT_XXL which is selected by XEN_PV only.
A pvops kernel without XEN_PV being configured is about 2.5% smaller
with this series applied.
tip commit 5800dc5c19f34e6e03b5adab1282535cb102fafd ("x86/paravirt:
Fix spectre-v2 mitigations for
2017 Sep 05
0
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...pes.h
index 19efefc0e27e..928f5e7953a7 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -319,6 +319,7 @@ struct pv_lock_ops {
void (*kick)(int cpu);
struct paravirt_callee_save vcpu_is_preempted;
+ struct paravirt_callee_save virt_spin_lock;
} __no_randomize_layout;
/* This contains all the paravirt structures: we get a convenient
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index 48a706f641f2..fbd98896385c 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -17,6 +17,25 @@ static inli...
2018 Feb 26
0
[v1 1/1] xen, mm: Allow deferred page initialization for xen pv domains
...include/asm/paravirt_types.h
> @@ -86,6 +86,9 @@ struct pv_init_ops {
> */
> unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
> unsigned long addr, unsigned len);
> +
> + /* called right after we finish boot allocator */
> + void (*after_bootmem)(void);
> } __no_randomize_layout;
>
>
> diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
> index 99dc79e76bdc..7b5f931e2e3a 100644
> --- a/arch/x86/kernel/paravirt.c
> +++ b/arch/x86/kernel/paravirt.c
> @@ -315,6 +315,7 @@ struct pv_info pv_info = {
>
> struct pv_init_ops pv_in...
2018 Aug 13
0
[PATCH v2 02/11] x86/paravirt: remove clobbers parameter from paravirt patch functions
...asm/paravirt_types.h
@@ -84,7 +84,7 @@ struct pv_init_ops {
* the number of bytes of code generated, as we nop pad the
* rest in generic code.
*/
- unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
+ unsigned (*patch)(u8 type, void *insnbuf,
unsigned long addr, unsigned len);
} __no_randomize_layout;
@@ -370,14 +370,13 @@ extern struct pv_lock_ops pv_lock_ops;
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
-unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
+unsigned paravirt_patch_default(u...
2018 Aug 13
11
[PATCH v2 00/11] x86/paravirt: several cleanups
This series removes some no longer needed stuff from paravirt
infrastructure and puts large quantities of paravirt ops under a new
config option PARAVIRT_XXL which is selected by XEN_PV only.
A pvops kernel without XEN_PV being configured is about 2.5% smaller
with this series applied.
tip commit 5800dc5c19f34e6e03b5adab1282535cb102fafd ("x86/paravirt:
Fix spectre-v2 mitigations for