Rebased cleanup base on previous QIng''s nested patch series. Thx, Eddie Unifying the CPU_BASED_VM_EXEC_CONTROL VMCS field write into a single place for additional control that is required in nested VMX. Signed-off-by: Qing He <qing.he@intel.com> Signed-off-by: Eddie Dong <eddie.dong@intel.com> diff -r b7c16de20715 xen/arch/x86/hvm/vmx/intr.c --- a/xen/arch/x86/hvm/vmx/intr.c Mon Sep 13 11:00:30 2010 +0800 +++ b/xen/arch/x86/hvm/vmx/intr.c Mon Sep 13 11:00:43 2010 +0800 @@ -69,7 +69,6 @@ static void enable_intr_window(struct vcpu *v, struct hvm_intack intack) { - u32 *cpu_exec_control = &v->arch.hvm_vmx.exec_control; u32 ctl = CPU_BASED_VIRTUAL_INTR_PENDING; ASSERT(intack.source != hvm_intsrc_none); @@ -103,10 +102,10 @@ static void enable_intr_window(struct vc ctl = CPU_BASED_VIRTUAL_NMI_PENDING; } - if ( !(*cpu_exec_control & ctl) ) + if ( !(v->arch.hvm_vmx.exec_control & ctl) ) { - *cpu_exec_control |= ctl; - __vmwrite(CPU_BASED_VM_EXEC_CONTROL, *cpu_exec_control); + v->arch.hvm_vmx.exec_control |= ctl; + vmx_update_cpu_exec_control(v); } } @@ -121,7 +120,7 @@ asmlinkage void vmx_intr_assist(void) if ( unlikely(v->arch.hvm_vcpu.single_step) ) { v->arch.hvm_vmx.exec_control |= CPU_BASED_MONITOR_TRAP_FLAG; - __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control); + vmx_update_cpu_exec_control(v); return; } diff -r b7c16de20715 xen/arch/x86/hvm/vmx/vmcs.c --- a/xen/arch/x86/hvm/vmx/vmcs.c Mon Sep 13 11:00:30 2010 +0800 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Mon Sep 13 11:00:43 2010 +0800 @@ -709,7 +709,7 @@ static int construct_vmcs(struct vcpu *v /* Do not enable Monitor Trap Flag unless start single step debug */ v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG; - __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control); + vmx_update_cpu_exec_control(v); __vmwrite(VM_EXIT_CONTROLS, vmexit_ctl); __vmwrite(VM_ENTRY_CONTROLS, vmentry_ctl); diff -r b7c16de20715 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Mon Sep 13 11:00:30 2010 +0800 +++ b/xen/arch/x86/hvm/vmx/vmx.c Mon Sep 13 11:01:26 2010 +0800 @@ -385,6 +385,11 @@ long_mode_do_msr_write(unsigned int msr, #endif /* __i386__ */ +void vmx_update_cpu_exec_control(struct vcpu *v) +{ + __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control); +} + static void vmx_update_secondary_exec_control(struct vcpu *v) { __vmwrite(SECONDARY_VM_EXEC_CONTROL, @@ -419,7 +424,7 @@ static void vmx_save_dr(struct vcpu *v) /* Clear the DR dirty flag and re-enable intercepts for DR accesses. */ v->arch.hvm_vcpu.flag_dr_dirty = 0; v->arch.hvm_vmx.exec_control |= CPU_BASED_MOV_DR_EXITING; - __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control); + vmx_update_cpu_exec_control(v); v->arch.guest_context.debugreg[0] = read_debugreg(0); v->arch.guest_context.debugreg[1] = read_debugreg(1); @@ -968,7 +973,7 @@ static void vmx_set_rdtsc_exiting(struct v->arch.hvm_vmx.exec_control &= ~CPU_BASED_RDTSC_EXITING; if ( enable ) v->arch.hvm_vmx.exec_control |= CPU_BASED_RDTSC_EXITING; - __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control); + vmx_update_cpu_exec_control(v); vmx_vmcs_exit(v); } @@ -1101,7 +1106,7 @@ static void vmx_update_guest_cr(struct v v->arch.hvm_vmx.exec_control &= ~cr3_ctls; if ( !hvm_paging_enabled(v) ) v->arch.hvm_vmx.exec_control |= cr3_ctls; - __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control); + vmx_update_cpu_exec_control(v); /* Changing CR0.PE can change some bits in real CR4. */ vmx_update_guest_cr(v, 4); @@ -1559,7 +1564,7 @@ static void vmx_dr_access(unsigned long /* Allow guest direct access to DR registers */ v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MOV_DR_EXITING; - __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control); + vmx_update_cpu_exec_control(v); } static void vmx_invlpg_intercept(unsigned long vaddr) @@ -2481,14 +2486,12 @@ asmlinkage void vmx_vmexit_handler(struc case EXIT_REASON_PENDING_VIRT_INTR: /* Disable the interrupt window. */ v->arch.hvm_vmx.exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; - __vmwrite(CPU_BASED_VM_EXEC_CONTROL, - v->arch.hvm_vmx.exec_control); + vmx_update_cpu_exec_control(v); break; case EXIT_REASON_PENDING_VIRT_NMI: /* Disable the NMI window. */ v->arch.hvm_vmx.exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; - __vmwrite(CPU_BASED_VM_EXEC_CONTROL, - v->arch.hvm_vmx.exec_control); + vmx_update_cpu_exec_control(v); break; case EXIT_REASON_TASK_SWITCH: { const enum hvm_task_switch_reason reasons[] = { @@ -2639,7 +2642,7 @@ asmlinkage void vmx_vmexit_handler(struc case EXIT_REASON_MONITOR_TRAP_FLAG: v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG; - __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control); + vmx_update_cpu_exec_control(v); if ( v->domain->debugger_attached && v->arch.hvm_vcpu.single_step ) domain_pause_for_debugger(); break; diff -r b7c16de20715 xen/include/asm-x86/hvm/vmx/vmx.h --- a/xen/include/asm-x86/hvm/vmx/vmx.h Mon Sep 13 11:00:30 2010 +0800 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h Mon Sep 13 11:00:43 2010 +0800 @@ -61,6 +61,7 @@ void vmx_realmode(struct cpu_user_regs * void vmx_realmode(struct cpu_user_regs *regs); void vmx_update_debug_state(struct vcpu *v); void vmx_update_exception_bitmap(struct vcpu *v); +void vmx_update_cpu_exec_control(struct vcpu *v); /* _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel