Displaying 20 results from an estimated 107 matches for "get_kernel_rpl".
2007 Apr 18
0
[RFC, PATCH 11/24] i386 Vmi segment changes
...Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to zach@vmware.com
+ *
+ */
+
+
+#ifndef __MACH_SEGMENT_H
+#define __MACH_SEGMENT_H
+
+#if !defined(CONFIG_X86_VMI)
+# error invalid sub-arch include
+#endif
+
+#ifndef __ASSEMBLY__
+static inline unsigned get_kernel_rpl(void)
+{
+ unsigned cs;
+ __asm__ ("movl %%cs,%0" : "=r"(cs):);
+ return cs & SEGMENT_RPL_MASK;
+}
+#endif
+
+#define COMPARE_SEGMENT_STACK(segment, offset) \
+ pushl %eax; \
+ mov offset+4(%esp), %eax; \
+ andl $~SEGMENT_RPL_MASK, %eax; \
+ cmpw $segment,%ax; \
+...
2007 Apr 18
0
[RFC, PATCH 11/24] i386 Vmi segment changes
...Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to zach@vmware.com
+ *
+ */
+
+
+#ifndef __MACH_SEGMENT_H
+#define __MACH_SEGMENT_H
+
+#if !defined(CONFIG_X86_VMI)
+# error invalid sub-arch include
+#endif
+
+#ifndef __ASSEMBLY__
+static inline unsigned get_kernel_rpl(void)
+{
+ unsigned cs;
+ __asm__ ("movl %%cs,%0" : "=r"(cs):);
+ return cs & SEGMENT_RPL_MASK;
+}
+#endif
+
+#define COMPARE_SEGMENT_STACK(segment, offset) \
+ pushl %eax; \
+ mov offset+4(%esp), %eax; \
+ andl $~SEGMENT_RPL_MASK, %eax; \
+ cmpw $segment,%ax; \
+...
2007 Apr 18
2
[patch 3/8] Allow a kernel to not be in ring 0.
...ting %cs or
checking privilege level (user vs kernel).
This is Chris' [RFC PATCH 15/33] move segment checks to subarch,
except rather than using #define USER_MODE_MASK which depends on a
config option, we use Zach's more flexible approach of assuming ring 3
== userspace. I also used "get_kernel_rpl()" over "get_kernel_cs()"
because I think it reads better in the code...
1) Remove the hardcoded 3 and introduce #define SEGMENT_RPL_MASK 3
2) Add a get_kernel_rpl() macro, and don't assume it's zero.
3) Use USER_RPL macro instead of hardcoded 3
Signed-off-by: Rusty Russell...
2007 Apr 18
2
[patch 3/8] Allow a kernel to not be in ring 0.
...ting %cs or
checking privilege level (user vs kernel).
This is Chris' [RFC PATCH 15/33] move segment checks to subarch,
except rather than using #define USER_MODE_MASK which depends on a
config option, we use Zach's more flexible approach of assuming ring 3
== userspace. I also used "get_kernel_rpl()" over "get_kernel_cs()"
because I think it reads better in the code...
1) Remove the hardcoded 3 and introduce #define SEGMENT_RPL_MASK 3
2) Add a get_kernel_rpl() macro, and don't assume it's zero.
3) Use USER_RPL macro instead of hardcoded 3
Signed-off-by: Rusty Russell...
2007 Dec 07
2
[PATCH 0/3] Unify segment headers
Hi,
In this patch, I unify segment_32.h and segment_64.h into segment.h
They have some common parts, but a considerable ammount of code still has
to be around ifdefs.
The only patch that is really important to paravirt is the first one, that
moves a paravirt definition into the common header. The other two are just
normal integration, and pretty much independent
2007 Dec 07
2
[PATCH 0/3] Unify segment headers
Hi,
In this patch, I unify segment_32.h and segment_64.h into segment.h
They have some common parts, but a considerable ammount of code still has
to be around ifdefs.
The only patch that is really important to paravirt is the first one, that
moves a paravirt definition into the common header. The other two are just
normal integration, and pretty much independent
2007 Apr 18
2
[PATCH 1/3] Paravirtualization: Kernel Ring Cleanups
...Booted on 2.6.16-rc2-git7
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This is Zach's patch to clean up assumptions about the kernel running
in ring 0 (which it doesn't when running paravirtualized).
1) Remove the hardcoded 3 and introduce #define SEGMENT_RPL_MASK 3
2) Add a get_kernel_rpl() function
3) Create COMPARE_SEGMENT_STACK and COMPARE_SEGMENT_REG macros which
can mask out the bottom two bits (RPL) when comparing for
paravirtualization.
diff -urpN --exclude TAGS -X /home/rusty/devel/kernel/kernel-patches/current-dontdiff --minimal linux-2.6.17-rc2-git7/arch/i386/kernel...
2007 Apr 18
2
[PATCH 1/3] Paravirtualization: Kernel Ring Cleanups
...Booted on 2.6.16-rc2-git7
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This is Zach's patch to clean up assumptions about the kernel running
in ring 0 (which it doesn't when running paravirtualized).
1) Remove the hardcoded 3 and introduce #define SEGMENT_RPL_MASK 3
2) Add a get_kernel_rpl() function
3) Create COMPARE_SEGMENT_STACK and COMPARE_SEGMENT_REG macros which
can mask out the bottom two bits (RPL) when comparing for
paravirtualization.
diff -urpN --exclude TAGS -X /home/rusty/devel/kernel/kernel-patches/current-dontdiff --minimal linux-2.6.17-rc2-git7/arch/i386/kernel...
2020 Aug 07
0
[PATCH v3 4/7] x86/paravirt: remove 32-bit support from PARAVIRT_XXL
...LE_LEVELS 2
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 3d2afecde50c..dedc89a07826 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -160,8 +160,6 @@ static inline void wbinvd(void)
PVOP_VCALL0(cpu.wbinvd);
}
-#define get_kernel_rpl() (pv_info.kernel_rpl)
-
static inline u64 paravirt_read_msr(unsigned msr)
{
return PVOP_CALL1(u64, cpu.read_msr, msr);
@@ -277,12 +275,10 @@ static inline void load_TLS(struct thread_struct *t, unsigned cpu)
PVOP_VCALL2(cpu.load_tls, t, cpu);
}
-#ifdef CONFIG_X86_64
static inline void l...
2020 Aug 15
0
[PATCH v4 1/6] x86/paravirt: remove 32-bit support from PARAVIRT_XXL
...LE_LEVELS 2
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 3d2afecde50c..25c7a73461f6 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -160,8 +160,6 @@ static inline void wbinvd(void)
PVOP_VCALL0(cpu.wbinvd);
}
-#define get_kernel_rpl() (pv_info.kernel_rpl)
-
static inline u64 paravirt_read_msr(unsigned msr)
{
return PVOP_CALL1(u64, cpu.read_msr, msr);
@@ -277,12 +275,10 @@ static inline void load_TLS(struct thread_struct *t, unsigned cpu)
PVOP_VCALL2(cpu.load_tls, t, cpu);
}
-#ifdef CONFIG_X86_64
static inline void l...
2013 Oct 31
1
[PATCH 3/3] x86: Support compiling out userspace I/O (iopl and ioperm)
...ded. In normal use, the flags restore
> + * in the switch assembly will handle this. But if the kernel
> + * is running virtualized at a non-zero CPL, the popf will
> + * not restore flags, so it must be done in a separate step.
> + */
> + if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
> + set_iopl_mask(next->iopl);
> +#endif /* CONFIG_X86_IOPORT */
> +}
> +
> +static inline void switch_io_bitmap(struct tss_struct *tss,
> + struct task_struct *prev_p,
> + struct task_struct...
2013 Oct 31
1
[PATCH 3/3] x86: Support compiling out userspace I/O (iopl and ioperm)
...ded. In normal use, the flags restore
> + * in the switch assembly will handle this. But if the kernel
> + * is running virtualized at a non-zero CPL, the popf will
> + * not restore flags, so it must be done in a separate step.
> + */
> + if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
> + set_iopl_mask(next->iopl);
> +#endif /* CONFIG_X86_IOPORT */
> +}
> +
> +static inline void switch_io_bitmap(struct tss_struct *tss,
> + struct task_struct *prev_p,
> + struct task_struct...
2007 Apr 19
0
[RFC, PATCH 3/5] Paravirt_ops pure functions.patch
...(void)
-{
- return paravirt_ops.paravirt_enabled;
-}
+extern int paravirt_enabled;
+extern char *paravirt_backend;
static inline void load_esp0(struct tss_struct *tss,
struct thread_struct *thread)
@@ -531,7 +524,12 @@ static inline void wbinvd(void)
PVOP_VCALL0(wbinvd);
}
-#define get_kernel_rpl() (paravirt_ops.kernel_rpl)
+#define get_kernel_rpl() \
+({ \
+ short kernel_cs; \
+ savesegment(cs, kernel_cs); \
+ (kernel_cs & SEGMENT_RPL_MASK); \
+})
static inline u64 paravirt_read_msr(unsigned msr, int *err)
{
@@ -799,6 +797,8 @@ static inline void pte_update_defer(stru...
2007 Apr 19
0
[RFC, PATCH 3/5] Paravirt_ops pure functions.patch
...(void)
-{
- return paravirt_ops.paravirt_enabled;
-}
+extern int paravirt_enabled;
+extern char *paravirt_backend;
static inline void load_esp0(struct tss_struct *tss,
struct thread_struct *thread)
@@ -531,7 +524,12 @@ static inline void wbinvd(void)
PVOP_VCALL0(wbinvd);
}
-#define get_kernel_rpl() (paravirt_ops.kernel_rpl)
+#define get_kernel_rpl() \
+({ \
+ short kernel_cs; \
+ savesegment(cs, kernel_cs); \
+ (kernel_cs & SEGMENT_RPL_MASK); \
+})
static inline u64 paravirt_read_msr(unsigned msr, int *err)
{
@@ -799,6 +797,8 @@ static inline void pte_update_defer(stru...
2020 Aug 07
4
[PATCH v3 0/7] Remove 32-bit Xen PV guest support
The long term plan has been to replace Xen PV guests by PVH. The first
victim of that plan are now 32-bit PV guests, as those are used only
rather seldom these days. Xen on x86 requires 64-bit support and with
Grub2 now supporting PVH officially since version 2.04 there is no
need to keep 32-bit PV guest support alive in the Linux kernel.
Additionally Meltdown mitigation is not available in the
2020 Jul 01
5
[PATCH v2 0/4] Remove 32-bit Xen PV guest support
The long term plan has been to replace Xen PV guests by PVH. The first
victim of that plan are now 32-bit PV guests, as those are used only
rather seldom these days. Xen on x86 requires 64-bit support and with
Grub2 now supporting PVH officially since version 2.04 there is no
need to keep 32-bit PV guest support alive in the Linux kernel.
Additionally Meltdown mitigation is not available in the
2020 Jul 01
5
[PATCH v2 0/4] Remove 32-bit Xen PV guest support
The long term plan has been to replace Xen PV guests by PVH. The first
victim of that plan are now 32-bit PV guests, as those are used only
rather seldom these days. Xen on x86 requires 64-bit support and with
Grub2 now supporting PVH officially since version 2.04 there is no
need to keep 32-bit PV guest support alive in the Linux kernel.
Additionally Meltdown mitigation is not available in the
2020 Aug 15
6
[PATCH v4 0/6] x86/paravirt: cleanup after 32-bit PV removal
A lot of cleanup after removal of 32-bit Xen PV guest support in
paravirt code.
Changes in V4:
- dropped patches 1-3, as already committed
- addressed comments to V3
- added new patches 5+6
Changes in V3:
- addressed comments to V2
- split patch 1 into 2 patches
- new patches 3 and 7
Changes in V2:
- rebase to 5.8 kernel
- addressed comments to V1
- new patches 3 and 4
Juergen Gross (6):
2013 Oct 22
0
[PATCH 3/3] x86: Support compiling out userspace I/O (iopl and ioperm)
...* Restore IOPL if needed. In normal use, the flags restore
+ * in the switch assembly will handle this. But if the kernel
+ * is running virtualized at a non-zero CPL, the popf will
+ * not restore flags, so it must be done in a separate step.
+ */
+ if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
+ set_iopl_mask(next->iopl);
+#endif /* CONFIG_X86_IOPORT */
+}
+
+static inline void switch_io_bitmap(struct tss_struct *tss,
+ struct task_struct *prev_p,
+ struct task_struct *next_p)
+{
+#ifdef CONFIG_X86_IOP...
2014 Mar 11
0
[PATCHv2 3/3] x86: Support compiling out userspace I/O (iopl and ioperm)
...* Restore IOPL if needed. In normal use, the flags restore
+ * in the switch assembly will handle this. But if the kernel
+ * is running virtualized at a non-zero CPL, the popf will
+ * not restore flags, so it must be done in a separate step.
+ */
+ if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
+ set_iopl_mask(next->iopl);
+#endif /* CONFIG_X86_IOPORT */
+}
+
+static inline void switch_io_bitmap(struct tss_struct *tss,
+ struct task_struct *prev_p,
+ struct task_struct *next_p)
+{
+#ifdef CONFIG_X86_IOP...