Displaying 15 results from an estimated 15 matches for "lgs_change".
2017 Oct 11
1
[PATCH v1 06/27] x86/entry/64: Adapt assembly for PIE support
...IRQs are on.
*/
- cmpq $.Lentry_SYSCALL_64_after_fastpath_call, (%rsp)
+ leaq .Lentry_SYSCALL_64_after_fastpath_call(%rip), %r11
+ cmpq %r11, (%rsp)
jne 1f
/*
@@ -1172,7 +1176,8 @@ ENTRY(error_entry)
movl %ecx, %eax /* zero extend */
cmpq %rax, RIP+8(%rsp)
je .Lbstep_iret
- cmpq $.Lgs_change, RIP+8(%rsp)
+ leaq .Lgs_change(%rip), %rcx
+ cmpq %rcx, RIP+8(%rsp)
jne .Lerror_entry_done
/*
@@ -1383,10 +1388,10 @@ ENTRY(nmi)
* resume the outer NMI.
*/
- movq $repeat_nmi, %rdx
+ leaq repeat_nmi(%rip), %rdx
cmpq 8(%rsp), %rdx
ja 1f
- movq $end_repeat_nmi, %rdx
+ leaq end_repe...
2017 Oct 20
0
[PATCH v1 06/27] x86/entry/64: Adapt assembly for PIE support
...er_fastpath_call, (%rsp)
> + leaq .Lentry_SYSCALL_64_after_fastpath_call(%rip), %r11
> + cmpq %r11, (%rsp)
> jne 1f
>
> /*
> @@ -1172,7 +1176,8 @@ ENTRY(error_entry)
> movl %ecx, %eax /* zero extend */
> cmpq %rax, RIP+8(%rsp)
> je .Lbstep_iret
> - cmpq $.Lgs_change, RIP+8(%rsp)
> + leaq .Lgs_change(%rip), %rcx
> + cmpq %rcx, RIP+8(%rsp)
> jne .Lerror_entry_done
>
> /*
> @@ -1383,10 +1388,10 @@ ENTRY(nmi)
> * resume the outer NMI.
> */
>
> - movq $repeat_nmi, %rdx
> + leaq repeat_nmi(%rip), %rdx
> cmpq 8(%rsp...
2018 Mar 13
0
[PATCH v2 06/27] x86/entry/64: Adapt assembly for PIE support
...tage trampoline.
*/
pushq %rdi
- movq $entry_SYSCALL_64_stage2, %rdi
+ movabsq $entry_SYSCALL_64_stage2, %rdi
JMP_NOSPEC %rdi
END(entry_SYSCALL_64_trampoline)
@@ -1275,7 +1275,8 @@ ENTRY(error_entry)
movl %ecx, %eax /* zero extend */
cmpq %rax, RIP+8(%rsp)
je .Lbstep_iret
- cmpq $.Lgs_change, RIP+8(%rsp)
+ leaq .Lgs_change(%rip), %rcx
+ cmpq %rcx, RIP+8(%rsp)
jne .Lerror_entry_done
/*
@@ -1480,10 +1481,10 @@ ENTRY(nmi)
* resume the outer NMI.
*/
- movq $repeat_nmi, %rdx
+ leaq repeat_nmi(%rip), %rdx
cmpq 8(%rsp), %rdx
ja 1f
- movq $end_repeat_nmi, %rdx
+ leaq end_repe...
2017 Oct 20
3
[PATCH v1 06/27] x86/entry/64: Adapt assembly for PIE support
...cmpq %r11, (%rsp)
>> jne 1f
>>
>> /*
>> @@ -1172,7 +1176,8 @@ ENTRY(error_entry)
>> movl %ecx, %eax /* zero extend */
>> cmpq %rax, RIP+8(%rsp)
>> je .Lbstep_iret
>> - cmpq $.Lgs_change, RIP+8(%rsp)
>> + leaq .Lgs_change(%rip), %rcx
>> + cmpq %rcx, RIP+8(%rsp)
>> jne .Lerror_entry_done
>>
>> /*
>> @@ -1383,10 +1388,10 @@ ENTRY(nmi)
>> * resume the outer NMI.
>> */
>>
>> - mo...
2017 Oct 20
3
[PATCH v1 06/27] x86/entry/64: Adapt assembly for PIE support
...cmpq %r11, (%rsp)
>> jne 1f
>>
>> /*
>> @@ -1172,7 +1176,8 @@ ENTRY(error_entry)
>> movl %ecx, %eax /* zero extend */
>> cmpq %rax, RIP+8(%rsp)
>> je .Lbstep_iret
>> - cmpq $.Lgs_change, RIP+8(%rsp)
>> + leaq .Lgs_change(%rip), %rcx
>> + cmpq %rcx, RIP+8(%rsp)
>> jne .Lerror_entry_done
>>
>> /*
>> @@ -1383,10 +1388,10 @@ ENTRY(nmi)
>> * resume the outer NMI.
>> */
>>
>> - mo...
2017 Oct 04
0
[PATCH 09/13] x86/asm: Convert ALTERNATIVE*() assembler macros to preprocessor macros
...X86_FEATURE_XMM)
#else
pushl $do_simd_coprocessor_error
#endif
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index c7c85724d7e0..49733c72619a 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -925,7 +925,7 @@ ENTRY(native_load_gs_index)
SWAPGS
.Lgs_change:
movl %edi, %gs
-2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
+2: ALTERNATIVE(, mfence, X86_BUG_SWAPGS_FENCE)
SWAPGS
popfq
FRAME_END
@@ -938,12 +938,8 @@ EXPORT_SYMBOL(native_load_gs_index)
/* running with kernelgs */
bad_gs:
SWAPGS /* switch back to user g...
2017 Oct 11
32
[PATCH v1 00/27] x86: PIE support and option to extend KASLR randomization
Changes:
- patch v1:
- Simplify ftrace implementation.
- Use gcc mstack-protector-guard-reg=%gs with PIE when possible.
- rfc v3:
- Use --emit-relocs instead of -pie to reduce dynamic relocation space on
mapped memory. It also simplifies the relocation process.
- Move the start the module section next to the kernel. Remove the need for
-mcmodel=large on modules. Extends
2017 Oct 11
32
[PATCH v1 00/27] x86: PIE support and option to extend KASLR randomization
Changes:
- patch v1:
- Simplify ftrace implementation.
- Use gcc mstack-protector-guard-reg=%gs with PIE when possible.
- rfc v3:
- Use --emit-relocs instead of -pie to reduce dynamic relocation space on
mapped memory. It also simplifies the relocation process.
- Move the start the module section next to the kernel. Remove the need for
-mcmodel=large on modules. Extends
2018 Mar 13
32
[PATCH v2 00/27] x86: PIE support and option to extend KASLR randomization
Changes:
- patch v2:
- Adapt patch to work post KPTI and compiler changes
- Redo all performance testing with latest configs and compilers
- Simplify mov macro on PIE (MOVABS now)
- Reduce GOT footprint
- patch v1:
- Simplify ftrace implementation.
- Use gcc mstack-protector-guard-reg=%gs with PIE when possible.
- rfc v3:
- Use --emit-relocs instead of -pie to reduce
2018 Mar 13
32
[PATCH v2 00/27] x86: PIE support and option to extend KASLR randomization
Changes:
- patch v2:
- Adapt patch to work post KPTI and compiler changes
- Redo all performance testing with latest configs and compilers
- Simplify mov macro on PIE (MOVABS now)
- Reduce GOT footprint
- patch v1:
- Simplify ftrace implementation.
- Use gcc mstack-protector-guard-reg=%gs with PIE when possible.
- rfc v3:
- Use --emit-relocs instead of -pie to reduce
2017 Oct 04
28
x86: PIE support and option to extend KASLR randomization
These patches make the changes necessary to build the kernel as Position
Independent Executable (PIE) on x86_64. A PIE kernel can be relocated below
the top 2G of the virtual address space. It allows to optionally extend the
KASLR randomization range from 1G to 3G.
Thanks a lot to Ard Biesheuvel & Kees Cook on their feedback on compiler
changes, PIE support and KASLR in general. Thanks to
2017 Oct 04
28
x86: PIE support and option to extend KASLR randomization
These patches make the changes necessary to build the kernel as Position
Independent Executable (PIE) on x86_64. A PIE kernel can be relocated below
the top 2G of the virtual address space. It allows to optionally extend the
KASLR randomization range from 1G to 3G.
Thanks a lot to Ard Biesheuvel & Kees Cook on their feedback on compiler
changes, PIE support and KASLR in general. Thanks to
2018 May 23
33
[PATCH v3 00/27] x86: PIE support and option to extend KASLR randomization
Changes:
- patch v3:
- Update on message to describe longer term PIE goal.
- Minor change on ftrace if condition.
- Changed code using xchgq.
- patch v2:
- Adapt patch to work post KPTI and compiler changes
- Redo all performance testing with latest configs and compilers
- Simplify mov macro on PIE (MOVABS now)
- Reduce GOT footprint
- patch v1:
- Simplify ftrace
2017 Oct 04
31
[PATCH 00/13] x86/paravirt: Make pv ops code generation more closely match reality
This changes the pv ops code generation to more closely match reality.
For example, instead of:
callq *0xffffffff81e3a400 (pv_irq_ops.save_fl)
vmlinux will now show:
pushfq
pop %rax
nop
nop
nop
nop
nop
which is what the runtime version of the code will show in most cases.
This idea was suggested by Andy Lutomirski.
The benefits are:
- For the most common runtime cases
2017 Oct 04
31
[PATCH 00/13] x86/paravirt: Make pv ops code generation more closely match reality
This changes the pv ops code generation to more closely match reality.
For example, instead of:
callq *0xffffffff81e3a400 (pv_irq_ops.save_fl)
vmlinux will now show:
pushfq
pop %rax
nop
nop
nop
nop
nop
which is what the runtime version of the code will show in most cases.
This idea was suggested by Andy Lutomirski.
The benefits are:
- For the most common runtime cases