Displaying 20 results from an estimated 37 matches for "restore_nocheck".
2007 Apr 18
1
[PATCH 1/2] Transparent entry.S IRQ holdoff handling
...overflow, but at least
* we know it happened and should be able to track it down.
*/
@@ -88,7 +89,7 @@ NT_MASK = 0x00004000
VM_MASK = 0x00020000
#ifdef CONFIG_PREEMPT
-#define preempt_stop cli
+#define preempt_stop CLI
#else
#define preempt_stop
#define resume_kernel restore_nocheck
@@ -161,7 +162,7 @@ ret_from_intr:
testl $(VM_MASK | 3), %eax
jz resume_kernel
ENTRY(resume_userspace)
- cli # make sure we don't miss an interrupt
+ CLI # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
mo...
2007 Apr 18
1
[PATCH 1/2] Transparent entry.S IRQ holdoff handling
...overflow, but at least
* we know it happened and should be able to track it down.
*/
@@ -88,7 +89,7 @@ NT_MASK = 0x00004000
VM_MASK = 0x00020000
#ifdef CONFIG_PREEMPT
-#define preempt_stop cli
+#define preempt_stop CLI
#else
#define preempt_stop
#define resume_kernel restore_nocheck
@@ -161,7 +162,7 @@ ret_from_intr:
testl $(VM_MASK | 3), %eax
jz resume_kernel
ENTRY(resume_userspace)
- cli # make sure we don't miss an interrupt
+ CLI # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
mo...
2007 Apr 18
1
[RFC, PATCH 8/24] i386 Vmi syscall assembly
...e <mach_asm.h>
#include "irq_vectors.h"
#define nr_syscalls ((syscall_table_size)/4)
@@ -76,7 +77,7 @@ NT_MASK = 0x00004000
VM_MASK = 0x00020000
#ifdef CONFIG_PREEMPT
-#define preempt_stop cli
+#define preempt_stop CLI
#else
#define preempt_stop
#define resume_kernel restore_nocheck
@@ -148,7 +149,7 @@ ret_from_intr:
testl $(VM_MASK | 3), %eax
jz resume_kernel
ENTRY(resume_userspace)
- cli # make sure we don't miss an interrupt
+ CLI # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
mo...
2007 Apr 18
1
[RFC, PATCH 8/24] i386 Vmi syscall assembly
...e <mach_asm.h>
#include "irq_vectors.h"
#define nr_syscalls ((syscall_table_size)/4)
@@ -76,7 +77,7 @@ NT_MASK = 0x00004000
VM_MASK = 0x00020000
#ifdef CONFIG_PREEMPT
-#define preempt_stop cli
+#define preempt_stop CLI
#else
#define preempt_stop
#define resume_kernel restore_nocheck
@@ -148,7 +149,7 @@ ret_from_intr:
testl $(VM_MASK | 3), %eax
jz resume_kernel
ENTRY(resume_userspace)
- cli # make sure we don't miss an interrupt
+ CLI # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
mo...
2007 Apr 18
2
[patch 3/8] Allow a kernel to not be in ring 0.
...movb OLDSS(%esp), %ah
movb CS(%esp), %al
- andl $(VM_MASK | (4 << 8) | 3), %eax
- cmpl $((4 << 8) | 3), %eax
+ andl $(VM_MASK | (4 << 8) | SEGMENT_RPL_MASK), %eax
+ cmpl $((4 << 8) | USER_RPL), %eax
CFI_REMEMBER_STATE
je ldt_ss # returning to user-space with LDT SS
restore_nocheck:
--- 2.6.18-rc3-32.orig/arch/i386/kernel/process.c
+++ 2.6.18-rc3-32/arch/i386/kernel/process.c
@@ -346,7 +346,7 @@ int kernel_thread(int (*fn)(void *), voi
regs.xes = __USER_DS;
regs.orig_eax = -1;
regs.eip = (unsigned long) kernel_thread_helper;
- regs.xcs = __KERNEL_CS;
+ regs.xcs = __KERN...
2007 Apr 18
2
[patch 3/8] Allow a kernel to not be in ring 0.
...movb OLDSS(%esp), %ah
movb CS(%esp), %al
- andl $(VM_MASK | (4 << 8) | 3), %eax
- cmpl $((4 << 8) | 3), %eax
+ andl $(VM_MASK | (4 << 8) | SEGMENT_RPL_MASK), %eax
+ cmpl $((4 << 8) | USER_RPL), %eax
CFI_REMEMBER_STATE
je ldt_ss # returning to user-space with LDT SS
restore_nocheck:
--- 2.6.18-rc3-32.orig/arch/i386/kernel/process.c
+++ 2.6.18-rc3-32/arch/i386/kernel/process.c
@@ -346,7 +346,7 @@ int kernel_thread(int (*fn)(void *), voi
regs.xes = __USER_DS;
regs.orig_eax = -1;
regs.eip = (unsigned long) kernel_thread_helper;
- regs.xcs = __KERNEL_CS;
+ regs.xcs = __KERN...
2007 Apr 18
0
[PATCH 17/21] i386 Ldt cleanups 1
...thread() for details.
movb OLDSS(%esp), %ah
movb CS(%esp), %al
- andl $(VM_MASK | (4 << 8) | 3), %eax
- cmpl $((4 << 8) | 3), %eax
+ andl $(VM_MASK | (LDT_SEGMENT << 8) | 3), %eax
+ cmpl $((LDT_SEGMENT << 8) | 3), %eax
je ldt_ss # returning to user-space with LDT SS
restore_nocheck:
RESTORE_REGS
Index: linux-2.6.14-zach-work/arch/i386/kernel/ptrace.c
===================================================================
--- linux-2.6.14-zach-work.orig/arch/i386/kernel/ptrace.c 2005-11-04 18:30:27.000000000 -0800
+++ linux-2.6.14-zach-work/arch/i386/kernel/ptrace.c 2005-11-05 0...
2007 Apr 18
0
[PATCH 17/21] i386 Ldt cleanups 1
...thread() for details.
movb OLDSS(%esp), %ah
movb CS(%esp), %al
- andl $(VM_MASK | (4 << 8) | 3), %eax
- cmpl $((4 << 8) | 3), %eax
+ andl $(VM_MASK | (LDT_SEGMENT << 8) | 3), %eax
+ cmpl $((LDT_SEGMENT << 8) | 3), %eax
je ldt_ss # returning to user-space with LDT SS
restore_nocheck:
RESTORE_REGS
Index: linux-2.6.14-zach-work/arch/i386/kernel/ptrace.c
===================================================================
--- linux-2.6.14-zach-work.orig/arch/i386/kernel/ptrace.c 2005-11-04 18:30:27.000000000 -0800
+++ linux-2.6.14-zach-work/arch/i386/kernel/ptrace.c 2005-11-05 0...
2007 Apr 18
1
[PATCH 6/14] i386 / Add some segment convenience functions
...thread() for details.
movb OLDSS(%esp), %ah
movb CS(%esp), %al
- andl $(VM_MASK | (4 << 8) | 3), %eax
- cmpl $((4 << 8) | 3), %eax
+ andl $(VM_MASK | (LDT_SEGMENT << 8) | 3), %eax
+ cmpl $((LDT_SEGMENT << 8) | 3), %eax
je ldt_ss # returning to user-space with LDT SS
restore_nocheck:
RESTORE_REGS
Index: linux-2.6.13/arch/i386/mm/fault.c
===================================================================
--- linux-2.6.13.orig/arch/i386/mm/fault.c 2005-08-09 19:43:38.000000000 -0700
+++ linux-2.6.13/arch/i386/mm/fault.c 2005-08-10 20:42:20.000000000 -0700
@@ -101,7 +101,7 @@...
2007 Apr 18
1
[PATCH 6/14] i386 / Add some segment convenience functions
...thread() for details.
movb OLDSS(%esp), %ah
movb CS(%esp), %al
- andl $(VM_MASK | (4 << 8) | 3), %eax
- cmpl $((4 << 8) | 3), %eax
+ andl $(VM_MASK | (LDT_SEGMENT << 8) | 3), %eax
+ cmpl $((LDT_SEGMENT << 8) | 3), %eax
je ldt_ss # returning to user-space with LDT SS
restore_nocheck:
RESTORE_REGS
Index: linux-2.6.13/arch/i386/mm/fault.c
===================================================================
--- linux-2.6.13.orig/arch/i386/mm/fault.c 2005-08-09 19:43:38.000000000 -0700
+++ linux-2.6.13/arch/i386/mm/fault.c 2005-08-10 20:42:20.000000000 -0700
@@ -101,7 +101,7 @@...
2007 Apr 18
1
[PATCH] Slight cleanups for x86 ring macros (against rc3-mm2)
...movb CS(%esp), %al
- andl $(VM_MASK | (4 << 8) | 3), %eax
- cmpl $((4 << 8) | 3), %eax
+ andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
+ cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
CFI_REMEMBER_STATE
je ldt_ss # returning to user-space with LDT SS
restore_nocheck:
diff -r d8064f9b5964 include/asm-i386/ptrace.h
--- a/include/asm-i386/ptrace.h Mon Aug 07 13:30:17 2006 +1000
+++ b/include/asm-i386/ptrace.h Mon Aug 07 14:32:11 2006 +1000
@@ -74,11 +74,11 @@ extern void send_sigtrap(struct task_str
*/
static inline int user_mode(struct pt_regs *regs)
{
- ret...
2007 Apr 18
1
[PATCH] Slight cleanups for x86 ring macros (against rc3-mm2)
...movb CS(%esp), %al
- andl $(VM_MASK | (4 << 8) | 3), %eax
- cmpl $((4 << 8) | 3), %eax
+ andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
+ cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
CFI_REMEMBER_STATE
je ldt_ss # returning to user-space with LDT SS
restore_nocheck:
diff -r d8064f9b5964 include/asm-i386/ptrace.h
--- a/include/asm-i386/ptrace.h Mon Aug 07 13:30:17 2006 +1000
+++ b/include/asm-i386/ptrace.h Mon Aug 07 14:32:11 2006 +1000
@@ -74,11 +74,11 @@ extern void send_sigtrap(struct task_str
*/
static inline int user_mode(struct pt_regs *regs)
{
- ret...
2007 Apr 19
0
[RFC, PATCH 3/5] Paravirt_ops pure functions.patch
...:44:49 2007 -0700
+++ b/arch/i386/kernel/entry.S Thu Apr 19 15:58:47 2007 -0700
@@ -436,7 +436,7 @@ ldt_ss:
* is still available to implement the setting of the high
* 16-bits in the INTERRUPT_RETURN paravirt-op.
*/
- cmpl $0, paravirt_ops+PARAVIRT_enabled
+ cmpl $0, paravirt_enabled
jne restore_nocheck
#endif
diff -r a6889086a657 arch/i386/kernel/vmi.c
--- a/arch/i386/kernel/vmi.c Thu Apr 19 15:44:49 2007 -0700
+++ b/arch/i386/kernel/vmi.c Thu Apr 19 16:00:31 2007 -0700
@@ -714,7 +714,6 @@ do { \
*/
static inline int __init activate_vmi(void)
{
- short kernel_cs;
u64 reloc;
cons...
2007 Apr 19
0
[RFC, PATCH 3/5] Paravirt_ops pure functions.patch
...:44:49 2007 -0700
+++ b/arch/i386/kernel/entry.S Thu Apr 19 15:58:47 2007 -0700
@@ -436,7 +436,7 @@ ldt_ss:
* is still available to implement the setting of the high
* 16-bits in the INTERRUPT_RETURN paravirt-op.
*/
- cmpl $0, paravirt_ops+PARAVIRT_enabled
+ cmpl $0, paravirt_enabled
jne restore_nocheck
#endif
diff -r a6889086a657 arch/i386/kernel/vmi.c
--- a/arch/i386/kernel/vmi.c Thu Apr 19 15:44:49 2007 -0700
+++ b/arch/i386/kernel/vmi.c Thu Apr 19 16:00:31 2007 -0700
@@ -714,7 +714,6 @@ do { \
*/
static inline int __init activate_vmi(void)
{
- short kernel_cs;
u64 reloc;
cons...
2007 Apr 18
2
[PATCH 1/3] Paravirtualization: Kernel Ring Cleanups
Hi all,
I've been looking at finding common ground between the VMI, Xen and
other paravirtualization approaches, and after some discussion, we're
getting somewhere.
These first two patches are the fundamentals, stolen mainly from the
VMI patches: removing assumptions about the kernel running in ring 0,
and macro-izing all the obvious para-virtualize-needing insns. The
third patch is
2007 Apr 18
2
[PATCH 1/3] Paravirtualization: Kernel Ring Cleanups
Hi all,
I've been looking at finding common ground between the VMI, Xen and
other paravirtualization approaches, and after some discussion, we're
getting somewhere.
These first two patches are the fundamentals, stolen mainly from the
VMI patches: removing assumptions about the kernel running in ring 0,
and macro-izing all the obvious para-virtualize-needing insns. The
third patch is
2007 Apr 18
8
[PATCH 0/7] x86 paravirtualization infrastructure
The following patches introduce the core infrastructure needed to
paravirtualize the 32-bit x86 Linux kernel. This is done by moving
virtualization sensitive insn's or code paths to a function table,
paravirt_ops. This structure can be populated with hypervisor specific
calls or native stubs and currently support running on bare metal, VMI,
Xen, or Lhype. These patches apply to
2007 Apr 18
8
[PATCH 0/7] x86 paravirtualization infrastructure
The following patches introduce the core infrastructure needed to
paravirtualize the 32-bit x86 Linux kernel. This is done by moving
virtualization sensitive insn's or code paths to a function table,
paravirt_ops. This structure can be populated with hypervisor specific
calls or native stubs and currently support running on bare metal, VMI,
Xen, or Lhype. These patches apply to
2007 Apr 18
8
[patch 0/8] Basic infrastructure patches for a paravirtualized kernel
Hi Andrew,
This series of patches lays the basic ground work for the
paravirtualized kernel patches coming later on. I think this lot is
ready for the rough-and-tumble world of the -mm tree.
The main change from the last posting is that all the page-table
related patches have been moved out, and will be posted separately.
Also, the off-by-one in reserving the top of address space has been
2007 Apr 18
8
[patch 0/8] Basic infrastructure patches for a paravirtualized kernel
Hi Andrew,
This series of patches lays the basic ground work for the
paravirtualized kernel patches coming later on. I think this lot is
ready for the rough-and-tumble world of the -mm tree.
The main change from the last posting is that all the page-table
related patches have been moved out, and will be posted separately.
Also, the off-by-one in reserving the top of address space has been