Cleanup mov to CR4 handling. Signed-off-by: Arun Sharma <arun.sharma@intel.com> --- a/xen/arch/x86/vmx.c Wed Jul 13 21:36:27 2005 +++ b/xen/arch/x86/vmx.c Wed Jul 13 16:02:04 2005 @@ -801,11 +801,7 @@ skip_cr3: error |= __vmread(CR4_READ_SHADOW, &old_cr4); -#if defined (__i386__) - error |= __vmwrite(GUEST_CR4, (c->cr4 | X86_CR4_VMXE)); -#else - error |= __vmwrite(GUEST_CR4, (c->cr4 | X86_CR4_VMXE | X86_CR4_PAE)); -#endif + error |= __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK)); error |= __vmwrite(CR4_READ_SHADOW, c->cr4); error |= __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit); @@ -1178,13 +1174,10 @@ { /* CR4 */ unsigned long old_guest_cr; - unsigned long pae_disabled = 0; __vmread(GUEST_CR4, &old_guest_cr); if (value & X86_CR4_PAE){ set_bit(VMX_CPU_STATE_PAE_ENABLED, &d->arch.arch_vmx.cpu_state); - if(!vmx_paging_enabled(d)) - pae_disabled = 1; } else { if (test_bit(VMX_CPU_STATE_LMA_ENABLED, &d->arch.arch_vmx.cpu_state)){ @@ -1194,11 +1187,8 @@ } __vmread(CR4_READ_SHADOW, &old_cr); - if (pae_disabled) - __vmwrite(GUEST_CR4, value| X86_CR4_VMXE); - else - __vmwrite(GUEST_CR4, value| X86_CR4_VMXE); - + + __vmwrite(GUEST_CR4, value| VMX_CR4_HOST_MASK); __vmwrite(CR4_READ_SHADOW, value); /* diff -r 4eee27d2c609 xen/include/asm-x86/vmx.h --- a/xen/include/asm-x86/vmx.h Wed Jul 13 21:36:27 2005 +++ b/xen/include/asm-x86/vmx.h Wed Jul 13 16:02:04 2005 @@ -183,6 +183,13 @@ EXCEPTION_BITMAP_GP ) #endif +/* These bits in the CR4 are owned by the host */ +#ifdef __i386__ +#define VMX_CR4_HOST_MASK (X86_CR4_VMXE) +#else +#define VMX_CR4_HOST_MASK (X86_CR4_VMXE | X86_CR4_PAE) +#endif + #define VMCALL_OPCODE ".byte 0x0f,0x01,0xc1\n" #define VMCLEAR_OPCODE ".byte 0x66,0x0f,0xc7\n" /* reg/opcode: /6 */ #define VMLAUNCH_OPCODE ".byte 0x0f,0x01,0xc2\n" _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel