Li, Jiongxi
2013-Jan-29 05:56 UTC
[PATCH v4 2/2] Xen: Fix VMCS setting for x2APIC mode guest while enabling APICV
The "APIC-register virtualization" and "virtual-interrupt deliver" VM-execution control has no effect on the behavior of RDMSR/WRMSR if the "virtualize x2APIC mode" VM-execution control is 0. When guest uses x2APIC mode, we should enable "virtualize x2APIC mode" for APICV first. Signed-off-by: Jiongxi Li <jiongxi.li@intel.com> diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index de22e03..4807eb2 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -190,7 +190,8 @@ static int vmx_init_vmcs_config(void) */ if ( _vmx_cpu_based_exec_control & CPU_BASED_TPR_SHADOW ) opt |= SECONDARY_EXEC_APIC_REGISTER_VIRT | - SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY; + SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; _vmx_secondary_exec_control = adjust_vmx_controls( @@ -659,17 +660,47 @@ void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr, int type) if ( msr <= 0x1fff ) { if (type & MSR_TYPE_R) - __clear_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /* read-low */ + clear_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /* read-low */ if (type & MSR_TYPE_W) - __clear_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /* write-low */ + clear_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /* write-low */ } else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) ) { msr &= 0x1fff; if (type & MSR_TYPE_R) - __clear_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /* read-high */ + clear_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /* read-high */ if (type & MSR_TYPE_W) - __clear_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /* write-high */ + clear_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /* write-high */ + } +} + +void vmx_enable_intercept_for_msr(struct vcpu *v, u32 msr, int type) +{ + unsigned long *msr_bitmap = v->arch.hvm_vmx.msr_bitmap; + + /* VMX MSR bitmap supported? */ + if ( msr_bitmap == NULL ) + return; + + /* + * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals + * have the write-low and read-high bitmap offsets the wrong way round. + * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. + */ + if ( msr <= 0x1fff ) + { + if (type & MSR_TYPE_R) + set_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /* read-low */ + if (type & MSR_TYPE_W) + set_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /* write-low */ + } + else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) ) + { + msr &= 0x1fff; + if (type & MSR_TYPE_R) + set_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /* read-high */ + if (type & MSR_TYPE_W) + set_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /* write-high */ } } @@ -764,6 +795,9 @@ static int construct_vmcs(struct vcpu *v) vmentry_ctl &= ~VM_ENTRY_LOAD_GUEST_PAT; } + /* Disable Virtualize x2APIC mode by default. */ + v->arch.hvm_vmx.secondary_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; + /* Do not enable Monitor Trap Flag unless start single step debug */ v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG; @@ -800,18 +834,6 @@ static int construct_vmcs(struct vcpu *v) vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP, MSR_TYPE_R | MSR_TYPE_W); if ( cpu_has_vmx_pat && paging_mode_hap(d) ) vmx_disable_intercept_for_msr(v, MSR_IA32_CR_PAT, MSR_TYPE_R | MSR_TYPE_W); - if ( cpu_has_vmx_apic_reg_virt ) - { - int msr; - for (msr = MSR_IA32_APICBASE_MSR; msr <= MSR_IA32_APICBASE_MSR + 0xff; msr++) - vmx_disable_intercept_for_msr(v, msr, MSR_TYPE_R); - } - if ( cpu_has_vmx_virtual_intr_delivery ) - { - vmx_disable_intercept_for_msr(v, MSR_IA32_APICTPR_MSR, MSR_TYPE_W); - vmx_disable_intercept_for_msr(v, MSR_IA32_APICEOI_MSR, MSR_TYPE_W); - vmx_disable_intercept_for_msr(v, MSR_IA32_APICSELF_MSR, MSR_TYPE_W); - } } /* I/O access bitmap. */ diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 43ed36c..53e048e 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -1886,18 +1886,58 @@ static void vmx_install_vlapic_mapping(struct vcpu *v) void vmx_vlapic_msr_changed(struct vcpu *v) { + int virtualize_x2apic_mode; struct vlapic *vlapic = vcpu_vlapic(v); - if ( !cpu_has_vmx_virtualize_apic_accesses ) + virtualize_x2apic_mode = ( (cpu_has_vmx_apic_reg_virt || + cpu_has_vmx_virtual_intr_delivery) && + cpu_has_vmx_virtualize_x2apic_mode ); + + if ( !cpu_has_vmx_virtualize_apic_accesses && + !virtualize_x2apic_mode ) return; vmx_vmcs_enter(v); v->arch.hvm_vmx.secondary_exec_control &- ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; + ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); if ( !vlapic_hw_disabled(vlapic) && (vlapic_base_address(vlapic) == APIC_DEFAULT_PHYS_BASE) ) - v->arch.hvm_vmx.secondary_exec_control |- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; + { + if ( virtualize_x2apic_mode && + vlapic_x2apic_mode(vlapic) ) + { + v->arch.hvm_vmx.secondary_exec_control |+ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; + if ( cpu_has_vmx_apic_reg_virt ) + { + for (int msr = MSR_IA32_APICBASE_MSR; msr <= MSR_IA32_APICBASE_MSR + 0xff; msr++) + vmx_disable_intercept_for_msr(v, msr, MSR_TYPE_R); + + vmx_enable_intercept_for_msr(v, MSR_IA32_APICPPR_MSR, MSR_TYPE_R); + vmx_enable_intercept_for_msr(v, MSR_IA32_APICTMICT_MSR, MSR_TYPE_R); + vmx_enable_intercept_for_msr(v, MSR_IA32_APICTMCCT_MSR, MSR_TYPE_R); + } + if ( cpu_has_vmx_virtual_intr_delivery ) + { + vmx_disable_intercept_for_msr(v, MSR_IA32_APICTPR_MSR, MSR_TYPE_W); + vmx_disable_intercept_for_msr(v, MSR_IA32_APICEOI_MSR, MSR_TYPE_W); + vmx_disable_intercept_for_msr(v, MSR_IA32_APICSELF_MSR, MSR_TYPE_W); + } + } + else + { + v->arch.hvm_vmx.secondary_exec_control |+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; + for (int msr = MSR_IA32_APICBASE_MSR; msr <= MSR_IA32_APICBASE_MSR + 0xff; msr++) + vmx_enable_intercept_for_msr(v, msr, MSR_TYPE_R); + + vmx_enable_intercept_for_msr(v, MSR_IA32_APICTPR_MSR, MSR_TYPE_W); + vmx_enable_intercept_for_msr(v, MSR_IA32_APICEOI_MSR, MSR_TYPE_W); + vmx_enable_intercept_for_msr(v, MSR_IA32_APICSELF_MSR, MSR_TYPE_W); + + } + } vmx_update_secondary_exec_control(v); vmx_vmcs_exit(v); } diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h index d4958c3..25c94a6 100644 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -184,6 +184,7 @@ extern u32 vmx_vmentry_control; #define SECONDARY_EXEC_ENABLE_EPT 0x00000002 #define SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING 0x00000004 #define SECONDARY_EXEC_ENABLE_RDTSCP 0x00000008 +#define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010 #define SECONDARY_EXEC_ENABLE_VPID 0x00000020 #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 @@ -244,6 +245,8 @@ extern bool_t cpu_has_vmx_ins_outs_instr_info; (vmx_secondary_exec_control & SECONDARY_EXEC_APIC_REGISTER_VIRT) #define cpu_has_vmx_virtual_intr_delivery \ (vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) +#define cpu_has_vmx_virtualize_x2apic_mode \ + (vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE) /* GUEST_INTERRUPTIBILITY_INFO flags. */ #define VMX_INTR_SHADOW_STI 0x00000001 @@ -434,6 +437,7 @@ enum vmcs_field { #define MSR_TYPE_R 1 #define MSR_TYPE_W 2 void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr, int type); +void vmx_enable_intercept_for_msr(struct vcpu *v, u32 msr, int type); int vmx_read_guest_msr(u32 msr, u64 *val); int vmx_write_guest_msr(u32 msr, u64 val); int vmx_add_guest_msr(u32 msr); diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h index 5c1de6e..f500efd 100644 --- a/xen/include/asm-x86/msr-index.h +++ b/xen/include/asm-x86/msr-index.h @@ -300,7 +300,10 @@ #define MSR_IA32_APICBASE_BASE (0xfffff<<12) #define MSR_IA32_APICBASE_MSR 0x800 #define MSR_IA32_APICTPR_MSR 0x808 +#define MSR_IA32_APICPPR_MSR 0x80a #define MSR_IA32_APICEOI_MSR 0x80b +#define MSR_IA32_APICTMICT_MSR 0x838 +#define MSR_IA32_APICTMCCT_MSR 0x839 #define MSR_IA32_APICSELF_MSR 0x83f #define MSR_IA32_UCODE_WRITE 0x00000079 -- 1.7.1
Jan Beulich
2013-Jan-29 09:07 UTC
Re: [PATCH v4 2/2] Xen: Fix VMCS setting for x2APIC mode guest while enabling APICV
>>> On 29.01.13 at 06:56, "Li, Jiongxi" <jiongxi.li@intel.com> wrote: > +void vmx_enable_intercept_for_msr(struct vcpu *v, u32 msr, int type) > +{ > + unsigned long *msr_bitmap = v->arch.hvm_vmx.msr_bitmap; > + > + /* VMX MSR bitmap supported? */ > + if ( msr_bitmap == NULL ) > + return; > + > + /* > + * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals > + * have the write-low and read-high bitmap offsets the wrong way round. > + * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. > + */ > + if ( msr <= 0x1fff ) > + { > + if (type & MSR_TYPE_R)Missing blanks again. I realize that you just cloned vmx_disable_intercept_for_msr(), but you shouldn''t repeat mistakes made in the original (on the opposite: since you have to modify the original anyway, you may feel free to adjust the coding convention violation there too).> + set_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /* read-low */ > + if (type & MSR_TYPE_W) > + set_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /* write-low */ > + } > + else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) ) > + { > + msr &= 0x1fff; > + if (type & MSR_TYPE_R) > + set_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /* read-high */ > + if (type & MSR_TYPE_W) > + set_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /* write-high */I believe that while the corresponding disable function is fine in this regard, here you need to do something in a final "else": A disable not having any effect is fine (we''ll still get the intercept), but an enable not having any effect is a problem. So I''d suggest adding a one-time warning and/or ASSERT(0) there.> if ( !vlapic_hw_disabled(vlapic) && > (vlapic_base_address(vlapic) == APIC_DEFAULT_PHYS_BASE) ) > - v->arch.hvm_vmx.secondary_exec_control |> - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; > + { > + if ( virtualize_x2apic_mode && > + vlapic_x2apic_mode(vlapic) )While this easily fits on one line, ...> + { > + v->arch.hvm_vmx.secondary_exec_control |> + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; > + if ( cpu_has_vmx_apic_reg_virt ) > + { > + for (int msr = MSR_IA32_APICBASE_MSR; msr <= MSR_IA32_APICBASE_MSR + 0xff; msr++)... long lines like this ...> + vmx_disable_intercept_for_msr(v, msr, MSR_TYPE_R); > + > + vmx_enable_intercept_for_msr(v, MSR_IA32_APICPPR_MSR, MSR_TYPE_R); > + vmx_enable_intercept_for_msr(v, MSR_IA32_APICTMICT_MSR, MSR_TYPE_R); > + vmx_enable_intercept_for_msr(v, MSR_IA32_APICTMCCT_MSR, MSR_TYPE_R);... or these need to be broken up.> + } > + if ( cpu_has_vmx_virtual_intr_delivery ) > + { > + vmx_disable_intercept_for_msr(v, MSR_IA32_APICTPR_MSR, MSR_TYPE_W); > + vmx_disable_intercept_for_msr(v, MSR_IA32_APICEOI_MSR, MSR_TYPE_W); > + vmx_disable_intercept_for_msr(v, MSR_IA32_APICSELF_MSR, MSR_TYPE_W); > + } > + } > + else > + { > + v->arch.hvm_vmx.secondary_exec_control |> + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; > + for (int msr = MSR_IA32_APICBASE_MSR; msr <= MSR_IA32_APICBASE_MSR + 0xff; msr++) > + vmx_enable_intercept_for_msr(v, msr, MSR_TYPE_R); > + > + vmx_enable_intercept_for_msr(v, MSR_IA32_APICTPR_MSR, MSR_TYPE_W); > + vmx_enable_intercept_for_msr(v, MSR_IA32_APICEOI_MSR, MSR_TYPE_W); > + vmx_enable_intercept_for_msr(v, MSR_IA32_APICSELF_MSR, MSR_TYPE_W);Wouldn''t it be more safe (especially towards future changes to the if() portion above) to simply enable all intercepts for read and write in the loop, rather than special casing the three ones that _currently_ get their write intercepts disabled above? Jan
Li, Jiongxi
2013-Jan-29 15:14 UTC
Re: [PATCH v4 2/2] Xen: Fix VMCS setting for x2APIC mode guest while enabling APICV
> -----Original Message----- > From: Jan Beulich [mailto:JBeulich@suse.com] > Sent: Tuesday, January 29, 2013 5:08 PM > To: Li, Jiongxi > Cc: Keir Fraser; xen-devel > Subject: Re: [PATCH v4 2/2] Xen: Fix VMCS setting for x2APIC mode guest while > enabling APICV > > >>> On 29.01.13 at 06:56, "Li, Jiongxi" <jiongxi.li@intel.com> wrote: > > +void vmx_enable_intercept_for_msr(struct vcpu *v, u32 msr, int type) > > +{ > > + unsigned long *msr_bitmap = v->arch.hvm_vmx.msr_bitmap; > > + > > + /* VMX MSR bitmap supported? */ > > + if ( msr_bitmap == NULL ) > > + return; > > + > > + /* > > + * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals > > + * have the write-low and read-high bitmap offsets the wrong way > round. > > + * We can control MSRs 0x00000000-0x00001fff and > 0xc0000000-0xc0001fff. > > + */ > > + if ( msr <= 0x1fff ) > > + { > > + if (type & MSR_TYPE_R) > > Missing blanks again. I realize that you just cloned > vmx_disable_intercept_for_msr(), but you shouldn''t repeat > mistakes made in the original (on the opposite: since you have > to modify the original anyway, you may feel free to adjust > the coding convention violation there too). > > > + set_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /* > read-low */ > > + if (type & MSR_TYPE_W) > > + set_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /* > write-low */ > > + } > > + else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) ) > > + { > > + msr &= 0x1fff; > > + if (type & MSR_TYPE_R) > > + set_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /* > read-high */ > > + if (type & MSR_TYPE_W) > > + set_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /* > write-high */ > > I believe that while the corresponding disable function is fine in > this regard, here you need to do something in a final "else": > A disable not having any effect is fine (we''ll still get the > intercept), but an enable not having any effect is a problem. So > I''d suggest adding a one-time warning and/or ASSERT(0) there.A final "else" means out of the ranges 00000000H - 00001FFFH and C0000000H - C0001FFFH. According to SDM, RDMSR and WRMSR out of the ranges will cause a VM exit, it is just what we want for "enable intercept" function, right?. So is it necessary to handle "else" case here?> > > if ( !vlapic_hw_disabled(vlapic) && > > (vlapic_base_address(vlapic) == APIC_DEFAULT_PHYS_BASE) ) > > - v->arch.hvm_vmx.secondary_exec_control |> > - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; > > + { > > + if ( virtualize_x2apic_mode && > > + vlapic_x2apic_mode(vlapic) ) > > While this easily fits on one line, ... > > > + { > > + v->arch.hvm_vmx.secondary_exec_control |> > + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; > > + if ( cpu_has_vmx_apic_reg_virt ) > > + { > > + for (int msr = MSR_IA32_APICBASE_MSR; msr <> MSR_IA32_APICBASE_MSR + 0xff; msr++) > > ... long lines like this ... > > > + vmx_disable_intercept_for_msr(v, msr, > MSR_TYPE_R); > > + > > + vmx_enable_intercept_for_msr(v, > MSR_IA32_APICPPR_MSR, MSR_TYPE_R); > > + vmx_enable_intercept_for_msr(v, > MSR_IA32_APICTMICT_MSR, MSR_TYPE_R); > > + vmx_enable_intercept_for_msr(v, > MSR_IA32_APICTMCCT_MSR, MSR_TYPE_R); > > ... or these need to be broken up. > > > + } > > + if ( cpu_has_vmx_virtual_intr_delivery ) > > + { > > + vmx_disable_intercept_for_msr(v, > MSR_IA32_APICTPR_MSR, MSR_TYPE_W); > > + vmx_disable_intercept_for_msr(v, > MSR_IA32_APICEOI_MSR, MSR_TYPE_W); > > + vmx_disable_intercept_for_msr(v, > MSR_IA32_APICSELF_MSR, MSR_TYPE_W); > > + } > > + } > > + else > > + { > > + v->arch.hvm_vmx.secondary_exec_control |> > + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; > > + for (int msr = MSR_IA32_APICBASE_MSR; msr <> MSR_IA32_APICBASE_MSR + 0xff; msr++) > > + vmx_enable_intercept_for_msr(v, msr, MSR_TYPE_R); > > + > > + vmx_enable_intercept_for_msr(v, MSR_IA32_APICTPR_MSR, > MSR_TYPE_W); > > + vmx_enable_intercept_for_msr(v, MSR_IA32_APICEOI_MSR, > MSR_TYPE_W); > > + vmx_enable_intercept_for_msr(v, MSR_IA32_APICSELF_MSR, > MSR_TYPE_W); > > Wouldn''t it be more safe (especially towards future changes to > the if() portion above) to simply enable all intercepts for read > and write in the loop, rather than special casing the three ones > that _currently_ get their write intercepts disabled above? > > Jan
Jan Beulich
2013-Jan-29 15:28 UTC
Re: [PATCH v4 2/2] Xen: Fix VMCS setting for x2APIC mode guest while enabling APICV
>>> On 29.01.13 at 16:14, "Li, Jiongxi" <jiongxi.li@intel.com> wrote: >> > + set_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /* >> read-low */ >> > + if (type & MSR_TYPE_W) >> > + set_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /* >> write-low */ >> > + } >> > + else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) ) >> > + { >> > + msr &= 0x1fff; >> > + if (type & MSR_TYPE_R) >> > + set_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /* >> read-high */ >> > + if (type & MSR_TYPE_W) >> > + set_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /* >> write-high */ >> >> I believe that while the corresponding disable function is fine in >> this regard, here you need to do something in a final "else": >> A disable not having any effect is fine (we''ll still get the >> intercept), but an enable not having any effect is a problem. So >> I''d suggest adding a one-time warning and/or ASSERT(0) there. > > A final "else" means out of the ranges 00000000H - 00001FFFH and > C0000000H - C0001FFFH. According to SDM, RDMSR and WRMSR > out of the ranges will cause a VM exit, it is just what we want for > "enable intercept" function, right?. So is it necessary to handle > "else" case here?Then maybe it doesn''t make that much sense. I just wondered how one would get notified of having tried to pass an out of range MSR index to one of these functions. Right now, you''d have to go hunt for this when - at least in a debug build - you could be pointed right at the wrong invocation by a stack trace. Jan
Maybe Matching Threads
- [RFC PATCH v7 18/78] KVM: vmx: pass struct kvm_vcpu to the intercept msr related functions
- [PATCH v9 19/84] KVM: vmx: pass struct kvm_vcpu to the intercept msr related functions
- [RFC PATCH v7 00/78] VM introspection
- [PATCH v9 00/84] VM introspection
- [RFC PATCH v1 06/34] KVM: x86: mmu: add support for EPT switching