Displaying 20 results from an estimated 29 matches for "restore_all".
2012 Oct 02
18
[PATCH 0/3] x86: adjust entry frame generation
This set of patches converts the way frames gets created from
using PUSHes/POPs to using MOVes, thus allowing (in certain
cases) to avoid saving/restoring part of the register set.
While the place where the (small) win from this comes from varies
between CPUs, the net effect is a 1 to 2% reduction on a
combined interruption entry and exit when the full state save
can be avoided.
1: use MOV
2006 Sep 26
5
powernow-k8: Processor cpuid 681 not supported
...hen I try to create a domU
BUG: warning at kernel/lockdep.c:1814/trace_hardirqs_on() (Not tainted)
[<c040566e>] show_trace_log_lvl+0x58/0x177
[<c0405c73>] show_trace+0xd/0x10
[<c0405cb1>] dump_stack+0x19/0x1b
[<c043646e>] trace_hardirqs_on+0xa4/0x120
[<c0404e67>] restore_all+0x37/0x3a
DWARF2 unwinder stuck at restore_all+0x37/0x3a
Leftover inexact backtrace:
Inexact backtrace:
[<c0405c73>] show_trace+0xd/0x10
[<c0405cb1>] dump_stack+0x19/0x1b
[<c043646e>] trace_hardirqs_on+0xa4/0x120
[<c0404e67>] restore_all+0x37/0x3a
any help ?
---...
2006 Sep 26
5
powernow-k8: Processor cpuid 681 not supported
...hen I try to create a domU
BUG: warning at kernel/lockdep.c:1814/trace_hardirqs_on() (Not tainted)
[<c040566e>] show_trace_log_lvl+0x58/0x177
[<c0405c73>] show_trace+0xd/0x10
[<c0405cb1>] dump_stack+0x19/0x1b
[<c043646e>] trace_hardirqs_on+0xa4/0x120
[<c0404e67>] restore_all+0x37/0x3a
DWARF2 unwinder stuck at restore_all+0x37/0x3a
Leftover inexact backtrace:
Inexact backtrace:
[<c0405c73>] show_trace+0xd/0x10
[<c0405cb1>] dump_stack+0x19/0x1b
[<c043646e>] trace_hardirqs_on+0xa4/0x120
[<c0404e67>] restore_all+0x37/0x3a
any help ?
---...
2007 Apr 18
2
[patch 3/8] Allow a kernel to not be in ring 0.
...MASK | 3), %eax
- jz resume_kernel
+ andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
+ cmpl $USER_RPL, %eax
+ jb resume_kernel # not returning to v8086 or userspace
ENTRY(resume_userspace)
cli # make sure we don't miss an interrupt
# setting need_resched or sigpending
@@ -367,8 +368,8 @@ restore_all:
# See comments in process.c:copy_thread() for details.
movb OLDSS(%esp), %ah
movb CS(%esp), %al
- andl $(VM_MASK | (4 << 8) | 3), %eax
- cmpl $((4 << 8) | 3), %eax
+ andl $(VM_MASK | (4 << 8) | SEGMENT_RPL_MASK), %eax
+ cmpl $((4 << 8) | USER_RPL), %eax
CFI_REMEMBER...
2007 Apr 18
2
[patch 3/8] Allow a kernel to not be in ring 0.
...MASK | 3), %eax
- jz resume_kernel
+ andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
+ cmpl $USER_RPL, %eax
+ jb resume_kernel # not returning to v8086 or userspace
ENTRY(resume_userspace)
cli # make sure we don't miss an interrupt
# setting need_resched or sigpending
@@ -367,8 +368,8 @@ restore_all:
# See comments in process.c:copy_thread() for details.
movb OLDSS(%esp), %ah
movb CS(%esp), %al
- andl $(VM_MASK | (4 << 8) | 3), %eax
- cmpl $((4 << 8) | 3), %eax
+ andl $(VM_MASK | (4 << 8) | SEGMENT_RPL_MASK), %eax
+ cmpl $((4 << 8) | USER_RPL), %eax
CFI_REMEMBER...
2007 Apr 18
0
[PATCH 17/21] i386 Ldt cleanups 1
...-work/arch/i386/kernel/entry.S
===================================================================
--- linux-2.6.14-zach-work.orig/arch/i386/kernel/entry.S 2005-10-27 17:02:08.000000000 -0700
+++ linux-2.6.14-zach-work/arch/i386/kernel/entry.S 2005-11-04 18:22:07.000000000 -0800
@@ -250,8 +250,8 @@ restore_all:
# See comments in process.c:copy_thread() for details.
movb OLDSS(%esp), %ah
movb CS(%esp), %al
- andl $(VM_MASK | (4 << 8) | 3), %eax
- cmpl $((4 << 8) | 3), %eax
+ andl $(VM_MASK | (LDT_SEGMENT << 8) | 3), %eax
+ cmpl $((LDT_SEGMENT << 8) | 3), %eax
je ldt_ss #...
2007 May 16
0
FW: trap/interrupt gate for hypercall
..., forget to join lists.
Regards,
Jinsong
-----Original Message-----
From: Liu, Jinsong
Sent: 2007年5月16日 8:23
To: ''Keir Fraser''; Tian, Kevin; xen-devel@lists.xensource.com
Subject: RE: [Xen-devel] trap/interrupt gate for hypercall
In native linux, syscall (gate type 15) and its RESTORE_ALL code provides weak fixup mechanism when reload segment register, by sending 0 selector to ds/es/gs, or kill the process when iret meets problem. Native linux can pay the price with this way since it doesn''t matter when a ring3 process meet problem (and the possibility of doing so is very l...
2007 Apr 18
0
[PATCH 17/21] i386 Ldt cleanups 1
...-work/arch/i386/kernel/entry.S
===================================================================
--- linux-2.6.14-zach-work.orig/arch/i386/kernel/entry.S 2005-10-27 17:02:08.000000000 -0700
+++ linux-2.6.14-zach-work/arch/i386/kernel/entry.S 2005-11-04 18:22:07.000000000 -0800
@@ -250,8 +250,8 @@ restore_all:
# See comments in process.c:copy_thread() for details.
movb OLDSS(%esp), %ah
movb CS(%esp), %al
- andl $(VM_MASK | (4 << 8) | 3), %eax
- cmpl $((4 << 8) | 3), %eax
+ andl $(VM_MASK | (LDT_SEGMENT << 8) | 3), %eax
+ cmpl $((LDT_SEGMENT << 8) | 3), %eax
je ldt_ss #...
2014 Nov 17
0
kworker/u16:57: page allocation failure: order:0, mode:0x284000
...stack+0x48/0x60
[<c056f3d4>] warn_alloc_failed+0xd4/0x110
[<c0571ade>] __alloc_pages_nodemask+0x81e/0xc30
[<c04e4b65>] ? clockevents_program_event+0x45/0x150
[<c05b81c8>] new_slab+0x258/0x3a0
[<c05b9490>] __slab_alloc.constprop.55+0x5f0/0x790
[<c0b34636>] ? restore_all+0xf/0xf
[<c0755622>] ? radix_tree_node_alloc+0x22/0x90
[<c04a9728>] ? __lock_is_held+0x48/0x70
[<c05bac55>] kmem_cache_alloc+0x295/0x3c0
[<c0755622>] ? radix_tree_node_alloc+0x22/0x90
[<c0755622>] radix_tree_node_alloc+0x22/0x90
[<c0755e7c>] __radix_tree_cr...
2007 Apr 18
1
[PATCH] Slight cleanups for x86 ring macros (against rc3-mm2)
...FLAGS and CS
movb CS(%esp), %al
andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
- cmpl $SEGMENT_RPL_MASK, %eax
+ cmpl $USER_RPL, %eax
jb resume_kernel # not returning to v8086 or userspace
ENTRY(resume_userspace)
DISABLE_INTERRUPTS # make sure we don't miss an interrupt
@@ -374,8 +374,8 @@ restore_all:
# See comments in process.c:copy_thread() for details.
movb OLDSS(%esp), %ah
movb CS(%esp), %al
- andl $(VM_MASK | (4 << 8) | 3), %eax
- cmpl $((4 << 8) | 3), %eax
+ andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
+ cmpl $((SEGMENT_LDT << 8) | USER_R...
2007 Apr 18
1
[PATCH] Slight cleanups for x86 ring macros (against rc3-mm2)
...FLAGS and CS
movb CS(%esp), %al
andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
- cmpl $SEGMENT_RPL_MASK, %eax
+ cmpl $USER_RPL, %eax
jb resume_kernel # not returning to v8086 or userspace
ENTRY(resume_userspace)
DISABLE_INTERRUPTS # make sure we don't miss an interrupt
@@ -374,8 +374,8 @@ restore_all:
# See comments in process.c:copy_thread() for details.
movb OLDSS(%esp), %ah
movb CS(%esp), %al
- andl $(VM_MASK | (4 << 8) | 3), %eax
- cmpl $((4 << 8) | 3), %eax
+ andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
+ cmpl $((SEGMENT_LDT << 8) | USER_R...
2007 Apr 18
1
[PATCH 1/2] Transparent entry.S IRQ holdoff handling
...eax,EAX(%esp) # store the return value
syscall_exit:
- cli # make sure we don't miss an interrupt
+ CLI # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
#ifdef CONFIG_TRAP_BAD_SYSCALL_EXITS
@@ -274,10 +273,10 @@ restore_all:
restore_nocheck:
RESTORE_REGS
addl $4, %esp
-1: iret
+1: IRET
.section .fixup,"ax"
iret_exc:
- sti
+ STI
pushl $0 # no error code
pushl $do_iret_error
jmp error_code
@@ -299,14 +298,14 @@ ldt_ss:
* CPUs, which we can try to work around to make
* dosemu and wine happy...
2007 Apr 18
1
[PATCH 1/2] Transparent entry.S IRQ holdoff handling
...eax,EAX(%esp) # store the return value
syscall_exit:
- cli # make sure we don't miss an interrupt
+ CLI # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
#ifdef CONFIG_TRAP_BAD_SYSCALL_EXITS
@@ -274,10 +273,10 @@ restore_all:
restore_nocheck:
RESTORE_REGS
addl $4, %esp
-1: iret
+1: IRET
.section .fixup,"ax"
iret_exc:
- sti
+ STI
pushl $0 # no error code
pushl $do_iret_error
jmp error_code
@@ -299,14 +298,14 @@ ldt_ss:
* CPUs, which we can try to work around to make
* dosemu and wine happy...
2012 Nov 22
41
[PATCH V3] vmx/nmi: Do not use self_nmi() in VMEXIT handler
The self_nmi() code cause''s an NMI to be triggered by sending an APIC
message to the local processor. However, NMIs are blocked by the
VMEXIT, until the next iret or VMENTER.
Volume 3 Chapter 27 Section 1 of the Intel SDM states:
An NMI causes subsequent NMIs to be blocked, but only after the VM exit
completes.
As a result, as soon as the VMENTER happens, an immediate VMEXIT
happens
2007 Apr 18
1
[RFC, PATCH 8/24] i386 Vmi syscall assembly
...)
movl %eax,EAX(%esp) # store the return value
syscall_exit:
- cli # make sure we don't miss an interrupt
+ CLI # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
movl TI_flags(%ebp), %ecx
@@ -256,14 +256,14 @@ restore_all:
restore_nocheck:
RESTORE_REGS
addl $4, %esp
-1: iret
-.section .fixup,"ax"
+1: IRET
+.pushsection .fixup,"ax"
iret_exc:
- sti
+ STI
pushl $0 # no error code
pushl $do_iret_error
jmp error_code
-.previous
+.popsection
.section __ex_table,"a"
.align 4...
2007 Apr 18
1
[RFC, PATCH 8/24] i386 Vmi syscall assembly
...)
movl %eax,EAX(%esp) # store the return value
syscall_exit:
- cli # make sure we don't miss an interrupt
+ CLI # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
movl TI_flags(%ebp), %ecx
@@ -256,14 +256,14 @@ restore_all:
restore_nocheck:
RESTORE_REGS
addl $4, %esp
-1: iret
-.section .fixup,"ax"
+1: IRET
+.pushsection .fixup,"ax"
iret_exc:
- sti
+ STI
pushl $0 # no error code
pushl $do_iret_error
jmp error_code
-.previous
+.popsection
.section __ex_table,"a"
.align 4...
2017 Oct 04
0
[PATCH 09/13] x86/asm: Convert ALTERNATIVE*() assembler macros to preprocessor macros
...t;, \
- "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
+ #define JMP_IF_IRET testl %eax, %eax; jz .Lsyscall_32_done
+ ALTERNATIVE(JMP_IF_IRET, jmp .Lsyscall_32_done, X86_FEATURE_XENPV)
/* Opportunistic SYSEXIT */
TRACE_IRQS_ON /* User mode traces as IRQs on. */
@@ -536,7 +536,7 @@ restore_all:
TRACE_IRQS_IRET
.Lrestore_all_notrace:
#ifdef CONFIG_X86_ESPFIX32
- ALTERNATIVE "jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX
+ ALTERNATIVE(jmp .Lrestore_nocheck, , X86_BUG_ESPFIX)
movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
/*
@@ -692,9 +692,9 @@ ENTRY(simd_cop...
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 12/17] paravirt_ops - interrupt/exception changes
...t
addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
.endif
- cli
+ DISABLE_INTERRUPTS(CLBR_NONE)
.if \irqtrace
TRACE_IRQS_OFF
.endif
@@ -767,10 +784,10 @@ paranoid_swapgs\trace:
.if \trace
TRACE_IRQS_IRETQ 0
.endif
- swapgs
+ SWAPGS
paranoid_restore\trace:
RESTORE_ALL 8
- iretq
+ INTERRUPT_RETURN
paranoid_userspace\trace:
GET_THREAD_INFO(%rcx)
movl threadinfo_flags(%rcx),%ebx
@@ -785,11 +802,11 @@ paranoid_userspace\trace:
.if \trace
TRACE_IRQS_ON
.endif
- sti
+ ENABLE_INTERRUPTS(CLBR_NONE)
xorl %esi,%esi /* arg2: oldset */
movq %rsp,%rdi /*...
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 12/17] paravirt_ops - interrupt/exception changes
...t
addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
.endif
- cli
+ DISABLE_INTERRUPTS(CLBR_NONE)
.if \irqtrace
TRACE_IRQS_OFF
.endif
@@ -767,10 +784,10 @@ paranoid_swapgs\trace:
.if \trace
TRACE_IRQS_IRETQ 0
.endif
- swapgs
+ SWAPGS
paranoid_restore\trace:
RESTORE_ALL 8
- iretq
+ INTERRUPT_RETURN
paranoid_userspace\trace:
GET_THREAD_INFO(%rcx)
movl threadinfo_flags(%rcx),%ebx
@@ -785,11 +802,11 @@ paranoid_userspace\trace:
.if \trace
TRACE_IRQS_ON
.endif
- sti
+ ENABLE_INTERRUPTS(CLBR_NONE)
xorl %esi,%esi /* arg2: oldset */
movq %rsp,%rdi /*...
2007 Apr 18
0
[PATCH] paravirt_ops x86_64 , take 2
...@@ -738,7 +733,7 @@ END(spurious_interrupt)
.if \ist
addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
.endif
- cli
+ DISABLE_INTERRUPTS(CLBR_NONE)
.if \irqtrace
TRACE_IRQS_OFF
.endif
@@ -770,7 +765,7 @@ paranoid_swapgs\trace:
swapgs
paranoid_restore\trace:
RESTORE_ALL 8
- iretq
+ INTERRUPT_RETURN
paranoid_userspace\trace:
GET_THREAD_INFO(%rcx)
movl threadinfo_flags(%rcx),%ebx
@@ -785,11 +780,11 @@ paranoid_userspace\trace:
.if \trace
TRACE_IRQS_ON
.endif
- sti
+ ENABLE_INTERRUPTS(CLBR_NONE)
xorl %esi,%esi /* arg2: oldset */
movq %rsp,%rdi /*...