Displaying 20 results from an estimated 38 matches for "vm_mask".
2007 Apr 18
2
[patch 3/8] Allow a kernel to not be in ring 0.
...nt.h | 12 ++++++++++++
6 files changed, 26 insertions(+), 15 deletions(-)
--- 2.6.18-rc3-32.orig/arch/i386/kernel/entry.S
+++ 2.6.18-rc3-32/arch/i386/kernel/entry.S
@@ -229,8 +229,9 @@ ret_from_intr:
check_userspace:
movl EFLAGS(%esp), %eax # mix EFLAGS and CS
movb CS(%esp), %al
- testl $(VM_MASK | 3), %eax
- jz resume_kernel
+ andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
+ cmpl $USER_RPL, %eax
+ jb resume_kernel # not returning to v8086 or userspace
ENTRY(resume_userspace)
cli # make sure we don't miss an interrupt
# setting need_resched or sigpending
@@ -367,8 +368,8 @@ rest...
2007 Apr 18
2
[patch 3/8] Allow a kernel to not be in ring 0.
...nt.h | 12 ++++++++++++
6 files changed, 26 insertions(+), 15 deletions(-)
--- 2.6.18-rc3-32.orig/arch/i386/kernel/entry.S
+++ 2.6.18-rc3-32/arch/i386/kernel/entry.S
@@ -229,8 +229,9 @@ ret_from_intr:
check_userspace:
movl EFLAGS(%esp), %eax # mix EFLAGS and CS
movb CS(%esp), %al
- testl $(VM_MASK | 3), %eax
- jz resume_kernel
+ andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
+ cmpl $USER_RPL, %eax
+ jb resume_kernel # not returning to v8086 or userspace
ENTRY(resume_userspace)
cli # make sure we don't miss an interrupt
# setting need_resched or sigpending
@@ -367,8 +368,8 @@ rest...
2007 Apr 18
1
[PATCH] Slight cleanups for x86 ring macros (against rc3-mm2)
...orp.com.au>
diff -r d8064f9b5964 arch/i386/kernel/entry.S
--- a/arch/i386/kernel/entry.S Mon Aug 07 13:30:17 2006 +1000
+++ b/arch/i386/kernel/entry.S Mon Aug 07 14:32:11 2006 +1000
@@ -237,7 +237,7 @@ check_userspace:
movl EFLAGS(%esp), %eax # mix EFLAGS and CS
movb CS(%esp), %al
andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
- cmpl $SEGMENT_RPL_MASK, %eax
+ cmpl $USER_RPL, %eax
jb resume_kernel # not returning to v8086 or userspace
ENTRY(resume_userspace)
DISABLE_INTERRUPTS # make sure we don't miss an interrupt
@@ -374,8 +374,8 @@ restore_all:
# See comments in process.c:copy_th...
2007 Apr 18
1
[PATCH] Slight cleanups for x86 ring macros (against rc3-mm2)
...orp.com.au>
diff -r d8064f9b5964 arch/i386/kernel/entry.S
--- a/arch/i386/kernel/entry.S Mon Aug 07 13:30:17 2006 +1000
+++ b/arch/i386/kernel/entry.S Mon Aug 07 14:32:11 2006 +1000
@@ -237,7 +237,7 @@ check_userspace:
movl EFLAGS(%esp), %eax # mix EFLAGS and CS
movb CS(%esp), %al
andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
- cmpl $SEGMENT_RPL_MASK, %eax
+ cmpl $USER_RPL, %eax
jb resume_kernel # not returning to v8086 or userspace
ENTRY(resume_userspace)
DISABLE_INTERRUPTS # make sure we don't miss an interrupt
@@ -374,8 +374,8 @@ restore_all:
# See comments in process.c:copy_th...
2007 Apr 18
1
[PATCH 2/2] Transparent privilege levels in entry.S
i386 Transparent Paravirtualization Patch #2
Changes required to low level fault / system call code for supporting
transparent paravirtualization where the kernel may be running at non-zero
CPL.
The testing of VM_MASK and CS together uses a neat trick which is now extended
to support CPL 0,1,2 kernels.
Note that assembly code must test against __ESPFIX_SS, the 16-bit stack on
interrupts and exceptions. To do so at non-zero CPL requires masking off
the RPL of the selector (which is a nop for the mach-default im...
2007 Apr 18
1
[PATCH 2/2] Transparent privilege levels in entry.S
i386 Transparent Paravirtualization Patch #2
Changes required to low level fault / system call code for supporting
transparent paravirtualization where the kernel may be running at non-zero
CPL.
The testing of VM_MASK and CS together uses a neat trick which is now extended
to support CPL 0,1,2 kernels.
Note that assembly code must test against __ESPFIX_SS, the 16-bit stack on
interrupts and exceptions. To do so at non-zero CPL requires masking off
the RPL of the selector (which is a nop for the mach-default im...
2007 Apr 18
0
[RFC, PATCH 11/24] i386 Vmi segment changes
...id send_sigtrap(struct task_str
*/
static inline int user_mode(struct pt_regs *regs)
{
- return (regs->xcs & 3) != 0;
+ return (regs->xcs & SEGMENT_RPL_MASK) == 3;
}
static inline int user_mode_vm(struct pt_regs *regs)
{
- return ((regs->xcs & 3) | (regs->eflags & VM_MASK)) != 0;
+ return (((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= 3);
}
#define instruction_pointer(regs) ((regs)->eip)
#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
Index: linux-2.6.16-rc5/include/asm-i386/segment.h
=========================...
2007 Apr 18
0
[RFC, PATCH 11/24] i386 Vmi segment changes
...id send_sigtrap(struct task_str
*/
static inline int user_mode(struct pt_regs *regs)
{
- return (regs->xcs & 3) != 0;
+ return (regs->xcs & SEGMENT_RPL_MASK) == 3;
}
static inline int user_mode_vm(struct pt_regs *regs)
{
- return ((regs->xcs & 3) | (regs->eflags & VM_MASK)) != 0;
+ return (((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= 3);
}
#define instruction_pointer(regs) ((regs)->eip)
#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
Index: linux-2.6.16-rc5/include/asm-i386/segment.h
=========================...
2007 Apr 18
0
[PATCH 17/21] i386 Ldt cleanups 1
...k.orig/arch/i386/kernel/entry.S 2005-10-27 17:02:08.000000000 -0700
+++ linux-2.6.14-zach-work/arch/i386/kernel/entry.S 2005-11-04 18:22:07.000000000 -0800
@@ -250,8 +250,8 @@ restore_all:
# See comments in process.c:copy_thread() for details.
movb OLDSS(%esp), %ah
movb CS(%esp), %al
- andl $(VM_MASK | (4 << 8) | 3), %eax
- cmpl $((4 << 8) | 3), %eax
+ andl $(VM_MASK | (LDT_SEGMENT << 8) | 3), %eax
+ cmpl $((LDT_SEGMENT << 8) | 3), %eax
je ldt_ss # returning to user-space with LDT SS
restore_nocheck:
RESTORE_REGS
Index: linux-2.6.14-zach-work/arch/i386/kernel/ptra...
2007 Apr 18
0
[PATCH 17/21] i386 Ldt cleanups 1
...k.orig/arch/i386/kernel/entry.S 2005-10-27 17:02:08.000000000 -0700
+++ linux-2.6.14-zach-work/arch/i386/kernel/entry.S 2005-11-04 18:22:07.000000000 -0800
@@ -250,8 +250,8 @@ restore_all:
# See comments in process.c:copy_thread() for details.
movb OLDSS(%esp), %ah
movb CS(%esp), %al
- andl $(VM_MASK | (4 << 8) | 3), %eax
- cmpl $((4 << 8) | 3), %eax
+ andl $(VM_MASK | (LDT_SEGMENT << 8) | 3), %eax
+ cmpl $((LDT_SEGMENT << 8) | 3), %eax
je ldt_ss # returning to user-space with LDT SS
restore_nocheck:
RESTORE_REGS
Index: linux-2.6.14-zach-work/arch/i386/kernel/ptra...
2007 Apr 18
1
[PATCH 6/14] i386 / Add some segment convenience functions
...=======
--- linux-2.6.13.orig/arch/i386/kernel/entry.S 2005-08-09 19:36:36.000000000 -0700
+++ linux-2.6.13/arch/i386/kernel/entry.S 2005-08-09 19:43:47.000000000 -0700
@@ -268,8 +268,8 @@
# See comments in process.c:copy_thread() for details.
movb OLDSS(%esp), %ah
movb CS(%esp), %al
- andl $(VM_MASK | (4 << 8) | 3), %eax
- cmpl $((4 << 8) | 3), %eax
+ andl $(VM_MASK | (LDT_SEGMENT << 8) | 3), %eax
+ cmpl $((LDT_SEGMENT << 8) | 3), %eax
je ldt_ss # returning to user-space with LDT SS
restore_nocheck:
RESTORE_REGS
Index: linux-2.6.13/arch/i386/mm/fault.c
==========...
2007 Apr 18
1
[PATCH 6/14] i386 / Add some segment convenience functions
...=======
--- linux-2.6.13.orig/arch/i386/kernel/entry.S 2005-08-09 19:36:36.000000000 -0700
+++ linux-2.6.13/arch/i386/kernel/entry.S 2005-08-09 19:43:47.000000000 -0700
@@ -268,8 +268,8 @@
# See comments in process.c:copy_thread() for details.
movb OLDSS(%esp), %ah
movb CS(%esp), %al
- andl $(VM_MASK | (4 << 8) | 3), %eax
- cmpl $((4 << 8) | 3), %eax
+ andl $(VM_MASK | (LDT_SEGMENT << 8) | 3), %eax
+ cmpl $((LDT_SEGMENT << 8) | 3), %eax
je ldt_ss # returning to user-space with LDT SS
restore_nocheck:
RESTORE_REGS
Index: linux-2.6.13/arch/i386/mm/fault.c
==========...
2007 Apr 18
2
[PATCH 19/21] i386 Kprobes semaphore fix
...g lock here, so this is safe.
Disarm the probe we just hit, and ignore it. */
p = get_kprobe(addr);
@@ -209,13 +211,20 @@ static int __kprobes kprobe_handler(stru
lock_kprobes();
p = get_kprobe(addr);
if (!p) {
+ unsigned char instr;
unlock_kprobes();
if (regs->eflags & VM_MASK) {
/* We are in virtual-8086 mode. Return 0 */
goto no_kprobe;
}
- if (*addr != BREAKPOINT_INSTRUCTION) {
+ instr = BREAKPOINT_INSTRUCTION;
+ if (user_mode(regs))
+ __get_user(instr, (unsigned char __user *) addr);
+ else
+ instr = *addr;
+
+ if (instr != BREAKPOINT_INSTRUC...
2007 Apr 18
2
[PATCH 19/21] i386 Kprobes semaphore fix
...g lock here, so this is safe.
Disarm the probe we just hit, and ignore it. */
p = get_kprobe(addr);
@@ -209,13 +211,20 @@ static int __kprobes kprobe_handler(stru
lock_kprobes();
p = get_kprobe(addr);
if (!p) {
+ unsigned char instr;
unlock_kprobes();
if (regs->eflags & VM_MASK) {
/* We are in virtual-8086 mode. Return 0 */
goto no_kprobe;
}
- if (*addr != BREAKPOINT_INSTRUCTION) {
+ instr = BREAKPOINT_INSTRUCTION;
+ if (user_mode(regs))
+ __get_user(instr, (unsigned char __user *) addr);
+ else
+ instr = *addr;
+
+ if (instr != BREAKPOINT_INSTRUC...
2007 Apr 18
1
[PATCH 1/2] Transparent entry.S IRQ holdoff handling
...m/page.h>
#include <asm/desc.h>
#include "irq_vectors.h"
+#include "mach_asm.h"
/* We do not recover from a stack overflow, but at least
* we know it happened and should be able to track it down.
*/
@@ -88,7 +89,7 @@ NT_MASK = 0x00004000
VM_MASK = 0x00020000
#ifdef CONFIG_PREEMPT
-#define preempt_stop cli
+#define preempt_stop CLI
#else
#define preempt_stop
#define resume_kernel restore_nocheck
@@ -161,7 +162,7 @@ ret_from_intr:
testl $(VM_MASK | 3), %eax
jz resume_kernel
ENTRY(resume_userspace)
- cli # make sure we don&...
2007 Apr 18
1
[PATCH 1/2] Transparent entry.S IRQ holdoff handling
...m/page.h>
#include <asm/desc.h>
#include "irq_vectors.h"
+#include "mach_asm.h"
/* We do not recover from a stack overflow, but at least
* we know it happened and should be able to track it down.
*/
@@ -88,7 +89,7 @@ NT_MASK = 0x00004000
VM_MASK = 0x00020000
#ifdef CONFIG_PREEMPT
-#define preempt_stop cli
+#define preempt_stop CLI
#else
#define preempt_stop
#define resume_kernel restore_nocheck
@@ -161,7 +162,7 @@ ret_from_intr:
testl $(VM_MASK | 3), %eax
jz resume_kernel
ENTRY(resume_userspace)
- cli # make sure we don&...
2007 Apr 18
8
[patch 0/8] Basic infrastructure patches for a paravirtualized kernel
Hi Andrew,
This series of patches lays the basic ground work for the
paravirtualized kernel patches coming later on. I think this lot is
ready for the rough-and-tumble world of the -mm tree.
The main change from the last posting is that all the page-table
related patches have been moved out, and will be posted separately.
Also, the off-by-one in reserving the top of address space has been
2007 Apr 18
8
[patch 0/8] Basic infrastructure patches for a paravirtualized kernel
Hi Andrew,
This series of patches lays the basic ground work for the
paravirtualized kernel patches coming later on. I think this lot is
ready for the rough-and-tumble world of the -mm tree.
The main change from the last posting is that all the page-table
related patches have been moved out, and will be posted separately.
Also, the off-by-one in reserving the top of address space has been
2007 Apr 18
1
[RFC, PATCH 8/24] i386 Vmi syscall assembly
...try.S 2006-03-08 11:40:12.000000000 -0800
@@ -48,6 +48,7 @@
#include <asm/smp.h>
#include <asm/page.h>
#include <asm/desc.h>
+#include <mach_asm.h>
#include "irq_vectors.h"
#define nr_syscalls ((syscall_table_size)/4)
@@ -76,7 +77,7 @@ NT_MASK = 0x00004000
VM_MASK = 0x00020000
#ifdef CONFIG_PREEMPT
-#define preempt_stop cli
+#define preempt_stop CLI
#else
#define preempt_stop
#define resume_kernel restore_nocheck
@@ -148,7 +149,7 @@ ret_from_intr:
testl $(VM_MASK | 3), %eax
jz resume_kernel
ENTRY(resume_userspace)
- cli # make sure we don&...
2007 Apr 18
1
[RFC, PATCH 8/24] i386 Vmi syscall assembly
...try.S 2006-03-08 11:40:12.000000000 -0800
@@ -48,6 +48,7 @@
#include <asm/smp.h>
#include <asm/page.h>
#include <asm/desc.h>
+#include <mach_asm.h>
#include "irq_vectors.h"
#define nr_syscalls ((syscall_table_size)/4)
@@ -76,7 +77,7 @@ NT_MASK = 0x00004000
VM_MASK = 0x00020000
#ifdef CONFIG_PREEMPT
-#define preempt_stop cli
+#define preempt_stop CLI
#else
#define preempt_stop
#define resume_kernel restore_nocheck
@@ -148,7 +149,7 @@ ret_from_intr:
testl $(VM_MASK | 3), %eax
jz resume_kernel
ENTRY(resume_userspace)
- cli # make sure we don&...