Displaying 20 results from an estimated 37 matches for "segment_rpl_mask".
2007 Apr 18
2
[patch 3/8] Allow a kernel to not be in ring 0.
...efine USER_MODE_MASK which depends on a
config option, we use Zach's more flexible approach of assuming ring 3
== userspace. I also used "get_kernel_rpl()" over "get_kernel_cs()"
because I think it reads better in the code...
1) Remove the hardcoded 3 and introduce #define SEGMENT_RPL_MASK 3
2) Add a get_kernel_rpl() macro, and don't assume it's zero.
3) Use USER_RPL macro instead of hardcoded 3
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Zachary Amsden <zach@vmware.com>
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signe...
2007 Apr 18
2
[patch 3/8] Allow a kernel to not be in ring 0.
...efine USER_MODE_MASK which depends on a
config option, we use Zach's more flexible approach of assuming ring 3
== userspace. I also used "get_kernel_rpl()" over "get_kernel_cs()"
because I think it reads better in the code...
1) Remove the hardcoded 3 and introduce #define SEGMENT_RPL_MASK 3
2) Add a get_kernel_rpl() macro, and don't assume it's zero.
3) Use USER_RPL macro instead of hardcoded 3
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Zachary Amsden <zach@vmware.com>
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signe...
2007 Apr 18
1
[PATCH] Slight cleanups for x86 ring macros (against rc3-mm2)
...>
diff -r d8064f9b5964 arch/i386/kernel/entry.S
--- a/arch/i386/kernel/entry.S Mon Aug 07 13:30:17 2006 +1000
+++ b/arch/i386/kernel/entry.S Mon Aug 07 14:32:11 2006 +1000
@@ -237,7 +237,7 @@ check_userspace:
movl EFLAGS(%esp), %eax # mix EFLAGS and CS
movb CS(%esp), %al
andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
- cmpl $SEGMENT_RPL_MASK, %eax
+ cmpl $USER_RPL, %eax
jb resume_kernel # not returning to v8086 or userspace
ENTRY(resume_userspace)
DISABLE_INTERRUPTS # make sure we don't miss an interrupt
@@ -374,8 +374,8 @@ restore_all:
# See comments in process.c:copy_thread() for details....
2007 Apr 18
1
[PATCH] Slight cleanups for x86 ring macros (against rc3-mm2)
...>
diff -r d8064f9b5964 arch/i386/kernel/entry.S
--- a/arch/i386/kernel/entry.S Mon Aug 07 13:30:17 2006 +1000
+++ b/arch/i386/kernel/entry.S Mon Aug 07 14:32:11 2006 +1000
@@ -237,7 +237,7 @@ check_userspace:
movl EFLAGS(%esp), %eax # mix EFLAGS and CS
movb CS(%esp), %al
andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
- cmpl $SEGMENT_RPL_MASK, %eax
+ cmpl $USER_RPL, %eax
jb resume_kernel # not returning to v8086 or userspace
ENTRY(resume_userspace)
DISABLE_INTERRUPTS # make sure we don't miss an interrupt
@@ -374,8 +374,8 @@ restore_all:
# See comments in process.c:copy_thread() for details....
2007 Apr 18
0
[RFC, PATCH 11/24] i386 Vmi segment changes
...struct task_struct;
extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
@@ -73,11 +74,11 @@ extern void send_sigtrap(struct task_str
*/
static inline int user_mode(struct pt_regs *regs)
{
- return (regs->xcs & 3) != 0;
+ return (regs->xcs & SEGMENT_RPL_MASK) == 3;
}
static inline int user_mode_vm(struct pt_regs *regs)
{
- return ((regs->xcs & 3) | (regs->eflags & VM_MASK)) != 0;
+ return (((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= 3);
}
#define instruction_pointer(regs) ((regs)->eip)
#if defi...
2007 Apr 18
0
[RFC, PATCH 11/24] i386 Vmi segment changes
...struct task_struct;
extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
@@ -73,11 +74,11 @@ extern void send_sigtrap(struct task_str
*/
static inline int user_mode(struct pt_regs *regs)
{
- return (regs->xcs & 3) != 0;
+ return (regs->xcs & SEGMENT_RPL_MASK) == 3;
}
static inline int user_mode_vm(struct pt_regs *regs)
{
- return ((regs->xcs & 3) | (regs->eflags & VM_MASK)) != 0;
+ return (((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= 3);
}
#define instruction_pointer(regs) ((regs)->eip)
#if defi...
2007 Apr 18
2
[PATCH 1/3] Paravirtualization: Kernel Ring Cleanups
...ernel Ring Cleanups
Status: Booted on 2.6.16-rc2-git7
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This is Zach's patch to clean up assumptions about the kernel running
in ring 0 (which it doesn't when running paravirtualized).
1) Remove the hardcoded 3 and introduce #define SEGMENT_RPL_MASK 3
2) Add a get_kernel_rpl() function
3) Create COMPARE_SEGMENT_STACK and COMPARE_SEGMENT_REG macros which
can mask out the bottom two bits (RPL) when comparing for
paravirtualization.
diff -urpN --exclude TAGS -X /home/rusty/devel/kernel/kernel-patches/current-dontdiff --minimal linux-2.6.17...
2007 Apr 18
2
[PATCH 1/3] Paravirtualization: Kernel Ring Cleanups
...ernel Ring Cleanups
Status: Booted on 2.6.16-rc2-git7
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This is Zach's patch to clean up assumptions about the kernel running
in ring 0 (which it doesn't when running paravirtualized).
1) Remove the hardcoded 3 and introduce #define SEGMENT_RPL_MASK 3
2) Add a get_kernel_rpl() function
3) Create COMPARE_SEGMENT_STACK and COMPARE_SEGMENT_REG macros which
can mask out the bottom two bits (RPL) when comparing for
paravirtualization.
diff -urpN --exclude TAGS -X /home/rusty/devel/kernel/kernel-patches/current-dontdiff --minimal linux-2.6.17...
2007 Apr 18
8
[patch 0/8] Basic infrastructure patches for a paravirtualized kernel
Hi Andrew,
This series of patches lays the basic ground work for the
paravirtualized kernel patches coming later on. I think this lot is
ready for the rough-and-tumble world of the -mm tree.
The main change from the last posting is that all the page-table
related patches have been moved out, and will be posted separately.
Also, the off-by-one in reserving the top of address space has been
2007 Apr 18
8
[patch 0/8] Basic infrastructure patches for a paravirtualized kernel
Hi Andrew,
This series of patches lays the basic ground work for the
paravirtualized kernel patches coming later on. I think this lot is
ready for the rough-and-tumble world of the -mm tree.
The main change from the last posting is that all the page-table
related patches have been moved out, and will be posted separately.
Also, the off-by-one in reserving the top of address space has been
2007 Apr 19
0
[RFC, PATCH 3/5] Paravirt_ops pure functions.patch
...(struct vmi_relocation_info *)&reloc;
@@ -722,13 +721,11 @@ static inline int __init activate_vmi(vo
printk(KERN_ERR "VMI ROM failed to initialize!");
return 0;
}
- savesegment(cs, kernel_cs);
-
- paravirt_ops.paravirt_enabled = 1;
- paravirt_ops.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
+
+ paravirt_enabled = 1;
+ paravirt_backend = "vmi";
paravirt_ops.patch = vmi_patch;
- paravirt_ops.name = "vmi";
/*
* Many of these operations are ABI compatible with VMI.
diff -r a6889086a657 include/asm-i386/paravirt.h
--- a/include/asm-i386/paravirt.h Thu Apr 19...
2007 Apr 19
0
[RFC, PATCH 3/5] Paravirt_ops pure functions.patch
...(struct vmi_relocation_info *)&reloc;
@@ -722,13 +721,11 @@ static inline int __init activate_vmi(vo
printk(KERN_ERR "VMI ROM failed to initialize!");
return 0;
}
- savesegment(cs, kernel_cs);
-
- paravirt_ops.paravirt_enabled = 1;
- paravirt_ops.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
+
+ paravirt_enabled = 1;
+ paravirt_backend = "vmi";
paravirt_ops.patch = vmi_patch;
- paravirt_ops.name = "vmi";
/*
* Many of these operations are ABI compatible with VMI.
diff -r a6889086a657 include/asm-i386/paravirt.h
--- a/include/asm-i386/paravirt.h Thu Apr 19...
2007 Apr 18
15
[PATCH 0 of 13] Basic infrastructure patches for a paravirtualized kernel
[ REPOST: Apologies to anyone who has seen this before. It
didn't make it onto any of the lists it should have. -J ]
Hi Andrew,
This series of patches lays the basic ground work for the
paravirtualized kernel patches coming later on. I think this lot is
ready for the rough-and-tumble world of the -mm tree.
For the most part, these patches do nothing or very little. The
patches should
2007 Apr 18
15
[PATCH 0 of 13] Basic infrastructure patches for a paravirtualized kernel
[ REPOST: Apologies to anyone who has seen this before. It
didn't make it onto any of the lists it should have. -J ]
Hi Andrew,
This series of patches lays the basic ground work for the
paravirtualized kernel patches coming later on. I think this lot is
ready for the rough-and-tumble world of the -mm tree.
For the most part, these patches do nothing or very little. The
patches should
2007 Jun 06
0
[PATCH UPDATE] xen: use iret directly where possible
...an interrupt instead. Unlikely?
+ Definitely. Easy to avoid? Yes. The Intel documents
+ explicitly say that the reported EIP for a bad jump is the
+ jump instruction itself, not the destination, but some virtual
+ environments get this wrong.
+ */
+ movl PT_CS+4(%esp), %ecx
+ andl $SEGMENT_RPL_MASK, %ecx
+ cmpl $USER_RPL, %ecx
+ je 2f
+
+ lea PT_ORIG_EAX+4(%esp), %esi
+ lea PT_EFLAGS+4(%esp), %edi
+
+ /* If eip is before iret_restore_end then stack
+ hasn't been restored yet. */
+ cmp $iret_restore_end, %eax
+ jae 1f
+
+ movl 0+4(%edi),%eax /* copy EAX */
+ movl %eax, PT_EAX+4(%esp)
+...
2007 Jun 06
0
[PATCH UPDATE] xen: use iret directly where possible
...an interrupt instead. Unlikely?
+ Definitely. Easy to avoid? Yes. The Intel documents
+ explicitly say that the reported EIP for a bad jump is the
+ jump instruction itself, not the destination, but some virtual
+ environments get this wrong.
+ */
+ movl PT_CS+4(%esp), %ecx
+ andl $SEGMENT_RPL_MASK, %ecx
+ cmpl $USER_RPL, %ecx
+ je 2f
+
+ lea PT_ORIG_EAX+4(%esp), %esi
+ lea PT_EFLAGS+4(%esp), %edi
+
+ /* If eip is before iret_restore_end then stack
+ hasn't been restored yet. */
+ cmp $iret_restore_end, %eax
+ jae 1f
+
+ movl 0+4(%edi),%eax /* copy EAX */
+ movl %eax, PT_EAX+4(%esp)
+...
2007 Jun 04
1
[PATCH] xen: use iret directly where possible
...imagine a case where userspace jumps into
+ the critical range address, but just before the CPU
+ delivers a GP, it decides to deliver an interrupt
+ instead. Unlikely? Definitely. Easy to avoid?
+ Yes. (Some virtual environments get this wrong.) */
+ movl PT_CS+4(%esp), %ecx
+ andl $SEGMENT_RPL_MASK, %ecx
+ cmpl $USER_RPL, %ecx
+ je 2f
+
+ lea PT_ORIG_EAX+4(%esp), %esi
+ lea PT_EFLAGS+4(%esp), %edi
+
+ /* If eip is before iret_restore_end then stack
+ hasn't been restored yet. */
+ cmp $iret_restore_end, %eax
+ jae 1f
+
+ movl 0+4(%edi),%eax /* copy EAX */
+ movl %eax, PT_EAX+4(%esp)
+...
2007 Jun 04
1
[PATCH] xen: use iret directly where possible
...imagine a case where userspace jumps into
+ the critical range address, but just before the CPU
+ delivers a GP, it decides to deliver an interrupt
+ instead. Unlikely? Definitely. Easy to avoid?
+ Yes. (Some virtual environments get this wrong.) */
+ movl PT_CS+4(%esp), %ecx
+ andl $SEGMENT_RPL_MASK, %ecx
+ cmpl $USER_RPL, %ecx
+ je 2f
+
+ lea PT_ORIG_EAX+4(%esp), %esi
+ lea PT_EFLAGS+4(%esp), %edi
+
+ /* If eip is before iret_restore_end then stack
+ hasn't been restored yet. */
+ cmp $iret_restore_end, %eax
+ jae 1f
+
+ movl 0+4(%edi),%eax /* copy EAX */
+ movl %eax, PT_EAX+4(%esp)
+...
2007 Jun 04
1
[PATCH] xen: use iret directly where possible
...imagine a case where userspace jumps into
+ the critical range address, but just before the CPU
+ delivers a GP, it decides to deliver an interrupt
+ instead. Unlikely? Definitely. Easy to avoid?
+ Yes. (Some virtual environments get this wrong.) */
+ movl PT_CS+4(%esp), %ecx
+ andl $SEGMENT_RPL_MASK, %ecx
+ cmpl $USER_RPL, %ecx
+ je 2f
+
+ lea PT_ORIG_EAX+4(%esp), %esi
+ lea PT_EFLAGS+4(%esp), %edi
+
+ /* If eip is before iret_restore_end then stack
+ hasn't been restored yet. */
+ cmp $iret_restore_end, %eax
+ jae 1f
+
+ movl 0+4(%edi),%eax /* copy EAX */
+ movl %eax, PT_EAX+4(%esp)
+...
2007 Apr 18
3
[PATCH 1/4] x86 paravirt_ops: create no_paravirt.h for native ops
...declaration, a strange C thing */
struct task_struct;
struct mm_struct;
===================================================================
--- a/include/asm-i386/segment.h
+++ b/include/asm-i386/segment.h
@@ -121,5 +121,4 @@
/* Bottom three bits of xcs give the ring privilege level */
#define SEGMENT_RPL_MASK 0x3
-#define get_kernel_rpl() 0
#endif
===================================================================
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -5,6 +5,7 @@
#include <asm/rwlock.h>
#include <asm/page.h>
#include <linux/compiler.h>
+#include...