Displaying 13 results from an estimated 13 matches for "get_cr0".
2007 Apr 18
1
[RFC, PATCH 8/24] i386 Vmi syscall assembly
...ng
- sti # could let do_syscall_trace() call
+ STI # could let do_syscall_trace() call
# schedule() instead
movl %esp, %eax
movl $1, %edx
@@ -475,7 +475,7 @@ ENTRY(simd_coprocessor_error)
ENTRY(device_not_available)
pushl $-1 # mark this as an int
SAVE_ALL
- movl %cr0, %eax
+ GET_CR0
testl $0x4, %eax # EM (math emulation bit)
jne device_not_available_emulate
preempt_stop
@@ -586,7 +586,7 @@ nmi_16bit_stack:
call do_nmi
RESTORE_REGS
lss 12+4(%esp), %esp # back to 16bit stack
-1: iret
+1: IRET
.section __ex_table,"a"
.align 4
.long 1b,iret_exc
Index:...
2007 Apr 18
1
[RFC, PATCH 8/24] i386 Vmi syscall assembly
...ng
- sti # could let do_syscall_trace() call
+ STI # could let do_syscall_trace() call
# schedule() instead
movl %esp, %eax
movl $1, %edx
@@ -475,7 +475,7 @@ ENTRY(simd_coprocessor_error)
ENTRY(device_not_available)
pushl $-1 # mark this as an int
SAVE_ALL
- movl %cr0, %eax
+ GET_CR0
testl $0x4, %eax # EM (math emulation bit)
jne device_not_available_emulate
preempt_stop
@@ -586,7 +586,7 @@ nmi_16bit_stack:
call do_nmi
RESTORE_REGS
lss 12+4(%esp), %esp # back to 16bit stack
-1: iret
+1: IRET
.section __ex_table,"a"
.align 4
.long 1b,iret_exc
Index:...
2006 Feb 10
0
(no subject)
...atch, can you take a look for me?
Thanks
-Xin
diff -r 47013962e411 tools/firmware/vmxassist/setup.c
--- a/tools/firmware/vmxassist/setup.c Fri Feb 10 02:23:36 2006
+++ b/tools/firmware/vmxassist/setup.c Fri Feb 10 15:28:17 2006
@@ -363,7 +363,7 @@
initialize_real_mode = 1;
cr0 = get_cr0();
-#ifndef TEST
+#ifdef TEST
set_cr0(cr0 | CR0_PE);
#endif
set_cr0(cr0 & ~CR0_PE);
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
2007 Apr 18
1
[PATCH 2/2] Transparent privilege levels in entry.S
...X_SS, %ax; \
jne 28f; \
- movl $__KERNEL_DS, %edx; \
+ movl $__USER_DS, %edx; \
movl %edx, %ds; \
movl %edx, %es; \
/* switch to 32bit stack */ \
@@ -500,7 +502,7 @@ ENTRY(simd_coprocessor_error)
ENTRY(device_not_available)
pushl $-1 # mark this as an int
SAVE_ALL
- movl %cr0, %eax
+ GET_CR0
testl $0x4, %eax # EM (math emulation bit)
jne device_not_available_emulate
preempt_stop
@@ -557,6 +559,7 @@ debug_stack_correct:
ENTRY(nmi)
pushl %eax
movl %ss, %eax
+ MASK_RPL(%ax)
cmpw $__ESPFIX_SS, %ax
popl %eax
je nmi_16bit_stack
Index: linux-2.6.13/include/asm-i386/mach-defa...
2007 Apr 18
1
[PATCH 2/2] Transparent privilege levels in entry.S
...X_SS, %ax; \
jne 28f; \
- movl $__KERNEL_DS, %edx; \
+ movl $__USER_DS, %edx; \
movl %edx, %ds; \
movl %edx, %es; \
/* switch to 32bit stack */ \
@@ -500,7 +502,7 @@ ENTRY(simd_coprocessor_error)
ENTRY(device_not_available)
pushl $-1 # mark this as an int
SAVE_ALL
- movl %cr0, %eax
+ GET_CR0
testl $0x4, %eax # EM (math emulation bit)
jne device_not_available_emulate
preempt_stop
@@ -557,6 +559,7 @@ debug_stack_correct:
ENTRY(nmi)
pushl %eax
movl %ss, %eax
+ MASK_RPL(%ax)
cmpw $__ESPFIX_SS, %ax
popl %eax
je nmi_16bit_stack
Index: linux-2.6.13/include/asm-i386/mach-defa...
2007 Apr 18
1
[PATCH 1/2] Transparent entry.S IRQ holdoff handling
...==================================
--- linux-2.6.13.orig/include/asm-i386/mach-default/mach_asm.h 2005-08-16 14:12:22.000000000 -0700
+++ linux-2.6.13/include/asm-i386/mach-default/mach_asm.h 2005-08-17 11:17:09.000000000 -0700
@@ -0,0 +1,11 @@
+#ifndef __MACH_ASM_H
+#define __MACH_ASM_H
+
+#define GET_CR0 mov %cr0, %eax
+#define IRET iret
+#define IRET16 iret
+#define CLI cli
+#define STI sti
+#define STI_SYSEXIT sti; sysexit
+
+#endif
2007 Apr 18
1
[PATCH 1/2] Transparent entry.S IRQ holdoff handling
...==================================
--- linux-2.6.13.orig/include/asm-i386/mach-default/mach_asm.h 2005-08-16 14:12:22.000000000 -0700
+++ linux-2.6.13/include/asm-i386/mach-default/mach_asm.h 2005-08-17 11:17:09.000000000 -0700
@@ -0,0 +1,11 @@
+#ifndef __MACH_ASM_H
+#define __MACH_ASM_H
+
+#define GET_CR0 mov %cr0, %eax
+#define IRET iret
+#define IRET16 iret
+#define CLI cli
+#define STI sti
+#define STI_SYSEXIT sti; sysexit
+
+#endif
2007 Apr 18
2
[PATCH 1/3] Paravirtualization: Kernel Ring Cleanups
Hi all,
I've been looking at finding common ground between the VMI, Xen and
other paravirtualization approaches, and after some discussion, we're
getting somewhere.
These first two patches are the fundamentals, stolen mainly from the
VMI patches: removing assumptions about the kernel running in ring 0,
and macro-izing all the obvious para-virtualize-needing insns. The
third patch is
2007 Apr 18
2
[PATCH 1/3] Paravirtualization: Kernel Ring Cleanups
Hi all,
I've been looking at finding common ground between the VMI, Xen and
other paravirtualization approaches, and after some discussion, we're
getting somewhere.
These first two patches are the fundamentals, stolen mainly from the
VMI patches: removing assumptions about the kernel running in ring 0,
and macro-izing all the obvious para-virtualize-needing insns. The
third patch is
2007 Apr 18
1
[PATCH] (with benchmarks) binary patching of paravirt_ops call sites
...*paravirt_ops+PARAVIRT_irq_disable; popl %edx; popl %ecx; popl %eax)
+#define STI PARA_PATCH(PARAVIRT_IRQ_ENABLE,pushl %eax; pushl %ecx; pushl %edx; call *paravirt_ops+PARAVIRT_irq_enable; popl %edx; popl %ecx; popl %eax)
#define STI_SYSEXIT jmp *paravirt_ops+PARAVIRT_irq_enable_sysexit
#define GET_CR0 pushl %ecx; pushl %edx; call *paravirt_ops+PARAVIRT_read_cr0; popl %edx; popl %ecx
@@ -204,9 +283,6 @@ static inline void sync_core(void)
popl %eax
#endif /* __ASSEMBLY__ */
-#define CLI_STRING "pushl %eax; pushl %ecx; pushl %edx; call *paravirt_ops+PARAVIRT_irq_disable; popl %edx; po...
2007 Apr 18
1
[PATCH] (with benchmarks) binary patching of paravirt_ops call sites
...*paravirt_ops+PARAVIRT_irq_disable; popl %edx; popl %ecx; popl %eax)
+#define STI PARA_PATCH(PARAVIRT_IRQ_ENABLE,pushl %eax; pushl %ecx; pushl %edx; call *paravirt_ops+PARAVIRT_irq_enable; popl %edx; popl %ecx; popl %eax)
#define STI_SYSEXIT jmp *paravirt_ops+PARAVIRT_irq_enable_sysexit
#define GET_CR0 pushl %ecx; pushl %edx; call *paravirt_ops+PARAVIRT_read_cr0; popl %edx; popl %ecx
@@ -204,9 +283,6 @@ static inline void sync_core(void)
popl %eax
#endif /* __ASSEMBLY__ */
-#define CLI_STRING "pushl %eax; pushl %ecx; pushl %edx; call *paravirt_ops+PARAVIRT_irq_disable; popl %edx; po...
2007 Apr 18
3
[RFC, PATCH 4/24] i386 Vmi inline implementation
...+#define STI_SYSEXIT vmi_raw_call(VMI_CALL_SYSEXIT, sti; sysexit)
+
+/*
+ * Due to the presence of "," in the instruction, and the use of
+ * -traditional to compile entry.S, we can not use a macro to
+ * encapsulate (mov %cr0, %eax); the full expansion must be
+ * written.
+ */
+#define GET_CR0 vmi_native_start; \
+ mov %cr0, %eax; \
+ vmi_native_finish; \
+ vmi_translation_start; \
+ vmi_call(VMI_CALL_GetCR0); \
+ vmi_translation_finish; \
+ vmi_padded_start; \
+ mov %cr0, %eax; \
+ vmi_nop_pad; \
+ vmi_padded_finish; \
+ vmi_annotate(VMI_CALL_GetCR0);
+...
2007 Apr 18
3
[RFC, PATCH 4/24] i386 Vmi inline implementation
...+#define STI_SYSEXIT vmi_raw_call(VMI_CALL_SYSEXIT, sti; sysexit)
+
+/*
+ * Due to the presence of "," in the instruction, and the use of
+ * -traditional to compile entry.S, we can not use a macro to
+ * encapsulate (mov %cr0, %eax); the full expansion must be
+ * written.
+ */
+#define GET_CR0 vmi_native_start; \
+ mov %cr0, %eax; \
+ vmi_native_finish; \
+ vmi_translation_start; \
+ vmi_call(VMI_CALL_GetCR0); \
+ vmi_translation_finish; \
+ vmi_padded_start; \
+ mov %cr0, %eax; \
+ vmi_nop_pad; \
+ vmi_padded_finish; \
+ vmi_annotate(VMI_CALL_GetCR0);
+...