Displaying 14 results from an estimated 14 matches for "repeat_nmi".
2017 Oct 11
1
[PATCH v1 06/27] x86/entry/64: Adapt assembly for PIE support
...ENTRY(error_entry)
movl %ecx, %eax /* zero extend */
cmpq %rax, RIP+8(%rsp)
je .Lbstep_iret
- cmpq $.Lgs_change, RIP+8(%rsp)
+ leaq .Lgs_change(%rip), %rcx
+ cmpq %rcx, RIP+8(%rsp)
jne .Lerror_entry_done
/*
@@ -1383,10 +1388,10 @@ ENTRY(nmi)
* resume the outer NMI.
*/
- movq $repeat_nmi, %rdx
+ leaq repeat_nmi(%rip), %rdx
cmpq 8(%rsp), %rdx
ja 1f
- movq $end_repeat_nmi, %rdx
+ leaq end_repeat_nmi(%rip), %rdx
cmpq 8(%rsp), %rdx
ja nested_nmi_out
1:
@@ -1440,7 +1445,8 @@ nested_nmi:
pushq %rdx
pushfq
pushq $__KERNEL_CS
- pushq $repeat_nmi
+ leaq repeat_nmi(%rip), %rdx...
2017 Oct 20
0
[PATCH v1 06/27] x86/entry/64: Adapt assembly for PIE support
...cmpq %rax, RIP+8(%rsp)
> je .Lbstep_iret
> - cmpq $.Lgs_change, RIP+8(%rsp)
> + leaq .Lgs_change(%rip), %rcx
> + cmpq %rcx, RIP+8(%rsp)
> jne .Lerror_entry_done
>
> /*
> @@ -1383,10 +1388,10 @@ ENTRY(nmi)
> * resume the outer NMI.
> */
>
> - movq $repeat_nmi, %rdx
> + leaq repeat_nmi(%rip), %rdx
> cmpq 8(%rsp), %rdx
> ja 1f
> - movq $end_repeat_nmi, %rdx
> + leaq end_repeat_nmi(%rip), %rdx
> cmpq 8(%rsp), %rdx
> ja nested_nmi_out
> 1:
> @@ -1440,7 +1445,8 @@ nested_nmi:
> pushq %rdx
> pushfq
> pushq $_...
2018 Mar 13
0
[PATCH v2 06/27] x86/entry/64: Adapt assembly for PIE support
...ENTRY(error_entry)
movl %ecx, %eax /* zero extend */
cmpq %rax, RIP+8(%rsp)
je .Lbstep_iret
- cmpq $.Lgs_change, RIP+8(%rsp)
+ leaq .Lgs_change(%rip), %rcx
+ cmpq %rcx, RIP+8(%rsp)
jne .Lerror_entry_done
/*
@@ -1480,10 +1481,10 @@ ENTRY(nmi)
* resume the outer NMI.
*/
- movq $repeat_nmi, %rdx
+ leaq repeat_nmi(%rip), %rdx
cmpq 8(%rsp), %rdx
ja 1f
- movq $end_repeat_nmi, %rdx
+ leaq end_repeat_nmi(%rip), %rdx
cmpq 8(%rsp), %rdx
ja nested_nmi_out
1:
@@ -1537,7 +1538,8 @@ nested_nmi:
pushq %rdx
pushfq
pushq $__KERNEL_CS
- pushq $repeat_nmi
+ leaq repeat_nmi(%rip), %rdx...
2017 Oct 20
3
[PATCH v1 06/27] x86/entry/64: Adapt assembly for PIE support
...(%rsp)
>> + leaq .Lgs_change(%rip), %rcx
>> + cmpq %rcx, RIP+8(%rsp)
>> jne .Lerror_entry_done
>>
>> /*
>> @@ -1383,10 +1388,10 @@ ENTRY(nmi)
>> * resume the outer NMI.
>> */
>>
>> - movq $repeat_nmi, %rdx
>> + leaq repeat_nmi(%rip), %rdx
>> cmpq 8(%rsp), %rdx
>> ja 1f
>> - movq $end_repeat_nmi, %rdx
>> + leaq end_repeat_nmi(%rip), %rdx
>> cmpq 8(%rsp), %rdx
>> ja nested_nmi_out
>> 1:
&...
2017 Oct 20
3
[PATCH v1 06/27] x86/entry/64: Adapt assembly for PIE support
...(%rsp)
>> + leaq .Lgs_change(%rip), %rcx
>> + cmpq %rcx, RIP+8(%rsp)
>> jne .Lerror_entry_done
>>
>> /*
>> @@ -1383,10 +1388,10 @@ ENTRY(nmi)
>> * resume the outer NMI.
>> */
>>
>> - movq $repeat_nmi, %rdx
>> + leaq repeat_nmi(%rip), %rdx
>> cmpq 8(%rsp), %rdx
>> ja 1f
>> - movq $end_repeat_nmi, %rdx
>> + leaq end_repeat_nmi(%rip), %rdx
>> cmpq 8(%rsp), %rdx
>> ja nested_nmi_out
>> 1:
&...
2018 Mar 14
0
[PATCH v2 06/27] x86/entry/64: Adapt assembly for PIE support
...pushq $1f /* RIP */
> > > + pushq %rax /* Support Position Independent Code */
> > > + leaq 1f(%rip), %rax /* RIP */
> > > + xchgq %rax, (%rsp) /* Restore RAX, put 1f */
> > > iretq /* continues at repeat_nmi below */
> > > UNWIND_HINT_IRET_REGS
> > > 1:
> >
> > Urgh, xchg with a memop has an implicit LOCK prefix.
> this_cpu_xchg uses no lock cmpxchg as a replacement to reduce latency.
Great, I will update my implementation.
Thanks Peter and Christoph.
> Fr...
2018 Mar 15
0
[PATCH v2 06/27] x86/entry/64: Adapt assembly for PIE support
On 14/03/2018 16:54, Christopher Lameter wrote:
>>> + pushq %rax /* Support Position Independent Code */
>>> + leaq 1f(%rip), %rax /* RIP */
>>> + xchgq %rax, (%rsp) /* Restore RAX, put 1f */
>>> iretq /* continues at repeat_nmi below */
>>> UNWIND_HINT_IRET_REGS
>>> 1:
>> Urgh, xchg with a memop has an implicit LOCK prefix.
> this_cpu_xchg uses no lock cmpxchg as a replacement to reduce latency.
That requires using a second register, since %rax is used as the
comparison source. At this poin...
2017 Oct 11
32
[PATCH v1 00/27] x86: PIE support and option to extend KASLR randomization
Changes:
- patch v1:
- Simplify ftrace implementation.
- Use gcc mstack-protector-guard-reg=%gs with PIE when possible.
- rfc v3:
- Use --emit-relocs instead of -pie to reduce dynamic relocation space on
mapped memory. It also simplifies the relocation process.
- Move the start the module section next to the kernel. Remove the need for
-mcmodel=large on modules. Extends
2017 Oct 11
32
[PATCH v1 00/27] x86: PIE support and option to extend KASLR randomization
Changes:
- patch v1:
- Simplify ftrace implementation.
- Use gcc mstack-protector-guard-reg=%gs with PIE when possible.
- rfc v3:
- Use --emit-relocs instead of -pie to reduce dynamic relocation space on
mapped memory. It also simplifies the relocation process.
- Move the start the module section next to the kernel. Remove the need for
-mcmodel=large on modules. Extends
2018 Mar 13
32
[PATCH v2 00/27] x86: PIE support and option to extend KASLR randomization
Changes:
- patch v2:
- Adapt patch to work post KPTI and compiler changes
- Redo all performance testing with latest configs and compilers
- Simplify mov macro on PIE (MOVABS now)
- Reduce GOT footprint
- patch v1:
- Simplify ftrace implementation.
- Use gcc mstack-protector-guard-reg=%gs with PIE when possible.
- rfc v3:
- Use --emit-relocs instead of -pie to reduce
2018 Mar 13
32
[PATCH v2 00/27] x86: PIE support and option to extend KASLR randomization
Changes:
- patch v2:
- Adapt patch to work post KPTI and compiler changes
- Redo all performance testing with latest configs and compilers
- Simplify mov macro on PIE (MOVABS now)
- Reduce GOT footprint
- patch v1:
- Simplify ftrace implementation.
- Use gcc mstack-protector-guard-reg=%gs with PIE when possible.
- rfc v3:
- Use --emit-relocs instead of -pie to reduce
2017 Oct 04
28
x86: PIE support and option to extend KASLR randomization
These patches make the changes necessary to build the kernel as Position
Independent Executable (PIE) on x86_64. A PIE kernel can be relocated below
the top 2G of the virtual address space. It allows to optionally extend the
KASLR randomization range from 1G to 3G.
Thanks a lot to Ard Biesheuvel & Kees Cook on their feedback on compiler
changes, PIE support and KASLR in general. Thanks to
2017 Oct 04
28
x86: PIE support and option to extend KASLR randomization
These patches make the changes necessary to build the kernel as Position
Independent Executable (PIE) on x86_64. A PIE kernel can be relocated below
the top 2G of the virtual address space. It allows to optionally extend the
KASLR randomization range from 1G to 3G.
Thanks a lot to Ard Biesheuvel & Kees Cook on their feedback on compiler
changes, PIE support and KASLR in general. Thanks to
2018 May 23
33
[PATCH v3 00/27] x86: PIE support and option to extend KASLR randomization
Changes:
- patch v3:
- Update on message to describe longer term PIE goal.
- Minor change on ftrace if condition.
- Changed code using xchgq.
- patch v2:
- Adapt patch to work post KPTI and compiler changes
- Redo all performance testing with latest configs and compilers
- Simplify mov macro on PIE (MOVABS now)
- Reduce GOT footprint
- patch v1:
- Simplify ftrace