Li, Jiongxi
2013-Jan-30 03:23 UTC
[PATCH v5 2/2] Xen: Fix VMCS setting for x2APIC mode guest while enabling APICV
The "APIC-register virtualization" and "virtual-interrupt
deliver"
VM-execution control has no effect on the behavior of RDMSR/WRMSR if
the "virtualize x2APIC mode" VM-execution control is 0.
When guest uses x2APIC mode, we should enable "virtualize x2APIC mode"
for APICV first.
Signed-off-by: Jiongxi Li <jiongxi.li@intel.com>
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index de22e03..8abdf6d 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -190,7 +190,8 @@ static int vmx_init_vmcs_config(void)
*/
if ( _vmx_cpu_based_exec_control & CPU_BASED_TPR_SHADOW )
opt |= SECONDARY_EXEC_APIC_REGISTER_VIRT |
- SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
+ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
+ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
_vmx_secondary_exec_control = adjust_vmx_controls(
@@ -658,19 +659,60 @@ void vmx_disable_intercept_for_msr(struct vcpu *v, u32
msr, int type)
*/
if ( msr <= 0x1fff )
{
- if (type & MSR_TYPE_R)
- __clear_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /* read-low */
- if (type & MSR_TYPE_W)
- __clear_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /* write-low
*/
+ if ( type & MSR_TYPE_R )
+ clear_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /* read-low */
+ if ( type & MSR_TYPE_W )
+ clear_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /* write-low */
}
else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
{
msr &= 0x1fff;
- if (type & MSR_TYPE_R)
- __clear_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /* read-high
*/
- if (type & MSR_TYPE_W)
- __clear_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /* write-high
*/
+ if ( type & MSR_TYPE_R )
+ clear_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /* read-high */
+ if ( type & MSR_TYPE_W )
+ clear_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /* write-high */
}
+ else
+ HVM_DBG_LOG(DBG_LEVEL_0,
+ "msr %x is out of the control range"
+ "0x00000000-0x00001fff and 0xc0000000-0xc0001fff"
+ "RDMSR or WRMSR will cause a VM exit", msr);
+
+}
+
+void vmx_enable_intercept_for_msr(struct vcpu *v, u32 msr, int type)
+{
+ unsigned long *msr_bitmap = v->arch.hvm_vmx.msr_bitmap;
+
+ /* VMX MSR bitmap supported? */
+ if ( msr_bitmap == NULL )
+ return;
+
+ /*
+ * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
+ * have the write-low and read-high bitmap offsets the wrong way round.
+ * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
+ */
+ if ( msr <= 0x1fff )
+ {
+ if ( type & MSR_TYPE_R )
+ set_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /* read-low */
+ if ( type & MSR_TYPE_W )
+ set_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /* write-low */
+ }
+ else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
+ {
+ msr &= 0x1fff;
+ if ( type & MSR_TYPE_R )
+ set_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /* read-high */
+ if ( type & MSR_TYPE_W )
+ set_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /* write-high */
+ }
+ else
+ HVM_DBG_LOG(DBG_LEVEL_0,
+ "msr %x is out of the control range"
+ "0x00000000-0x00001fff and 0xc0000000-0xc0001fff"
+ "RDMSR or WRMSR will cause a VM exit", msr);
}
/*
@@ -764,6 +806,9 @@ static int construct_vmcs(struct vcpu *v)
vmentry_ctl &= ~VM_ENTRY_LOAD_GUEST_PAT;
}
+ /* Disable Virtualize x2APIC mode by default. */
+ v->arch.hvm_vmx.secondary_exec_control &=
~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
+
/* Do not enable Monitor Trap Flag unless start single step debug */
v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG;
@@ -800,18 +845,6 @@ static int construct_vmcs(struct vcpu *v)
vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP, MSR_TYPE_R |
MSR_TYPE_W);
if ( cpu_has_vmx_pat && paging_mode_hap(d) )
vmx_disable_intercept_for_msr(v, MSR_IA32_CR_PAT, MSR_TYPE_R |
MSR_TYPE_W);
- if ( cpu_has_vmx_apic_reg_virt )
- {
- int msr;
- for (msr = MSR_IA32_APICBASE_MSR; msr <= MSR_IA32_APICBASE_MSR +
0xff; msr++)
- vmx_disable_intercept_for_msr(v, msr, MSR_TYPE_R);
- }
- if ( cpu_has_vmx_virtual_intr_delivery )
- {
- vmx_disable_intercept_for_msr(v, MSR_IA32_APICTPR_MSR, MSR_TYPE_W);
- vmx_disable_intercept_for_msr(v, MSR_IA32_APICEOI_MSR, MSR_TYPE_W);
- vmx_disable_intercept_for_msr(v, MSR_IA32_APICSELF_MSR,
MSR_TYPE_W);
- }
}
/* I/O access bitmap. */
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 362273b..855a003 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1887,18 +1887,61 @@ static void vmx_install_vlapic_mapping(struct vcpu *v)
void vmx_vlapic_msr_changed(struct vcpu *v)
{
+ int virtualize_x2apic_mode;
struct vlapic *vlapic = vcpu_vlapic(v);
- if ( !cpu_has_vmx_virtualize_apic_accesses )
+ virtualize_x2apic_mode = ( (cpu_has_vmx_apic_reg_virt ||
+ cpu_has_vmx_virtual_intr_delivery) &&
+ cpu_has_vmx_virtualize_x2apic_mode );
+
+ if ( !cpu_has_vmx_virtualize_apic_accesses &&
+ !virtualize_x2apic_mode )
return;
vmx_vmcs_enter(v);
v->arch.hvm_vmx.secondary_exec_control &-
~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+ ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
if ( !vlapic_hw_disabled(vlapic) &&
(vlapic_base_address(vlapic) == APIC_DEFAULT_PHYS_BASE) )
- v->arch.hvm_vmx.secondary_exec_control |-
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+ {
+ if ( virtualize_x2apic_mode && vlapic_x2apic_mode(vlapic) )
+ {
+ v->arch.hvm_vmx.secondary_exec_control |+
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
+ if ( cpu_has_vmx_apic_reg_virt )
+ {
+ for (int msr = MSR_IA32_APICBASE_MSR;
+ msr <= MSR_IA32_APICBASE_MSR + 0xff; msr++)
+ vmx_disable_intercept_for_msr(v, msr, MSR_TYPE_R);
+
+ vmx_enable_intercept_for_msr(v, MSR_IA32_APICPPR_MSR,
+ MSR_TYPE_R);
+ vmx_enable_intercept_for_msr(v, MSR_IA32_APICTMICT_MSR,
+ MSR_TYPE_R);
+ vmx_enable_intercept_for_msr(v, MSR_IA32_APICTMCCT_MSR,
+ MSR_TYPE_R);
+ }
+ if ( cpu_has_vmx_virtual_intr_delivery )
+ {
+ vmx_disable_intercept_for_msr(v, MSR_IA32_APICTPR_MSR,
+ MSR_TYPE_W);
+ vmx_disable_intercept_for_msr(v, MSR_IA32_APICEOI_MSR,
+ MSR_TYPE_W);
+ vmx_disable_intercept_for_msr(v, MSR_IA32_APICSELF_MSR,
+ MSR_TYPE_W);
+ }
+ }
+ else
+ {
+ v->arch.hvm_vmx.secondary_exec_control |+
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+ for (int msr = MSR_IA32_APICBASE_MSR;
+ msr <= MSR_IA32_APICBASE_MSR + 0xff; msr++)
+ vmx_enable_intercept_for_msr(v, msr,
+ MSR_TYPE_R | MSR_TYPE_W);
+ }
+ }
vmx_update_secondary_exec_control(v);
vmx_vmcs_exit(v);
}
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h
b/xen/include/asm-x86/hvm/vmx/vmcs.h
index d4958c3..25c94a6 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -184,6 +184,7 @@ extern u32 vmx_vmentry_control;
#define SECONDARY_EXEC_ENABLE_EPT 0x00000002
#define SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING 0x00000004
#define SECONDARY_EXEC_ENABLE_RDTSCP 0x00000008
+#define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010
#define SECONDARY_EXEC_ENABLE_VPID 0x00000020
#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040
#define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
@@ -244,6 +245,8 @@ extern bool_t cpu_has_vmx_ins_outs_instr_info;
(vmx_secondary_exec_control & SECONDARY_EXEC_APIC_REGISTER_VIRT)
#define cpu_has_vmx_virtual_intr_delivery \
(vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
+#define cpu_has_vmx_virtualize_x2apic_mode \
+ (vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)
/* GUEST_INTERRUPTIBILITY_INFO flags. */
#define VMX_INTR_SHADOW_STI 0x00000001
@@ -434,6 +437,7 @@ enum vmcs_field {
#define MSR_TYPE_R 1
#define MSR_TYPE_W 2
void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr, int type);
+void vmx_enable_intercept_for_msr(struct vcpu *v, u32 msr, int type);
int vmx_read_guest_msr(u32 msr, u64 *val);
int vmx_write_guest_msr(u32 msr, u64 val);
int vmx_add_guest_msr(u32 msr);
diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
index 5c1de6e..f500efd 100644
--- a/xen/include/asm-x86/msr-index.h
+++ b/xen/include/asm-x86/msr-index.h
@@ -300,7 +300,10 @@
#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
#define MSR_IA32_APICBASE_MSR 0x800
#define MSR_IA32_APICTPR_MSR 0x808
+#define MSR_IA32_APICPPR_MSR 0x80a
#define MSR_IA32_APICEOI_MSR 0x80b
+#define MSR_IA32_APICTMICT_MSR 0x838
+#define MSR_IA32_APICTMCCT_MSR 0x839
#define MSR_IA32_APICSELF_MSR 0x83f
#define MSR_IA32_UCODE_WRITE 0x00000079
--
1.7.1
Dong, Eddie
2013-Jan-31 15:07 UTC
Re: [PATCH v5 2/2] Xen: Fix VMCS setting for x2APIC mode guest while enabling APICV
Acked-by: Eddie Dong <eddie.dong@intel.com>> -----Original Message----- > From: xen-devel-bounces@lists.xen.org > [mailto:xen-devel-bounces@lists.xen.org] On Behalf Of Li, Jiongxi > Sent: Wednesday, January 30, 2013 11:24 AM > To: ''xen-devel@lists.xen.org''; Jan Beulich > Cc: Keir Fraser > Subject: [Xen-devel] [PATCH v5 2/2] Xen: Fix VMCS setting for x2APIC mode > guest while enabling APICV > > The "APIC-register virtualization" and "virtual-interrupt deliver" > VM-execution control has no effect on the behavior of RDMSR/WRMSR if > the "virtualize x2APIC mode" VM-execution control is 0. > When guest uses x2APIC mode, we should enable "virtualize x2APIC mode" > for APICV first. > > Signed-off-by: Jiongxi Li <jiongxi.li@intel.com> > > diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c > index de22e03..8abdf6d 100644 > --- a/xen/arch/x86/hvm/vmx/vmcs.c > +++ b/xen/arch/x86/hvm/vmx/vmcs.c > @@ -190,7 +190,8 @@ static int vmx_init_vmcs_config(void) > */ > if ( _vmx_cpu_based_exec_control & > CPU_BASED_TPR_SHADOW ) > opt |= SECONDARY_EXEC_APIC_REGISTER_VIRT | > - SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY; > + SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | > + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; > > > _vmx_secondary_exec_control = adjust_vmx_controls( > @@ -658,19 +659,60 @@ void vmx_disable_intercept_for_msr(struct vcpu > *v, u32 msr, int type) > */ > if ( msr <= 0x1fff ) > { > - if (type & MSR_TYPE_R) > - __clear_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /* > read-low */ > - if (type & MSR_TYPE_W) > - __clear_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /* > write-low */ > + if ( type & MSR_TYPE_R ) > + clear_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /* > read-low */ > + if ( type & MSR_TYPE_W ) > + clear_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /* > write-low */ > } > else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) ) > { > msr &= 0x1fff; > - if (type & MSR_TYPE_R) > - __clear_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /* > read-high */ > - if (type & MSR_TYPE_W) > - __clear_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /* > write-high */ > + if ( type & MSR_TYPE_R ) > + clear_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /* > read-high */ > + if ( type & MSR_TYPE_W ) > + clear_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /* > write-high */ > } > + else > + HVM_DBG_LOG(DBG_LEVEL_0, > + "msr %x is out of the control range" > + "0x00000000-0x00001fff and > 0xc0000000-0xc0001fff" > + "RDMSR or WRMSR will cause a VM exit", msr); > + > +} > + > +void vmx_enable_intercept_for_msr(struct vcpu *v, u32 msr, int type) > +{ > + unsigned long *msr_bitmap = v->arch.hvm_vmx.msr_bitmap; > + > + /* VMX MSR bitmap supported? */ > + if ( msr_bitmap == NULL ) > + return; > + > + /* > + * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals > + * have the write-low and read-high bitmap offsets the wrong way > round. > + * We can control MSRs 0x00000000-0x00001fff and > 0xc0000000-0xc0001fff. > + */ > + if ( msr <= 0x1fff ) > + { > + if ( type & MSR_TYPE_R ) > + set_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /* > read-low */ > + if ( type & MSR_TYPE_W ) > + set_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /* > write-low */ > + } > + else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) ) > + { > + msr &= 0x1fff; > + if ( type & MSR_TYPE_R ) > + set_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /* > read-high */ > + if ( type & MSR_TYPE_W ) > + set_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /* > write-high */ > + } > + else > + HVM_DBG_LOG(DBG_LEVEL_0, > + "msr %x is out of the control range" > + "0x00000000-0x00001fff and > 0xc0000000-0xc0001fff" > + "RDMSR or WRMSR will cause a VM exit", msr); > } > > /* > @@ -764,6 +806,9 @@ static int construct_vmcs(struct vcpu *v) > vmentry_ctl &= ~VM_ENTRY_LOAD_GUEST_PAT; > } > > + /* Disable Virtualize x2APIC mode by default. */ > + v->arch.hvm_vmx.secondary_exec_control &> ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; > + > /* Do not enable Monitor Trap Flag unless start single step debug */ > v->arch.hvm_vmx.exec_control &> ~CPU_BASED_MONITOR_TRAP_FLAG; > > @@ -800,18 +845,6 @@ static int construct_vmcs(struct vcpu *v) > vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP, > MSR_TYPE_R | MSR_TYPE_W); > if ( cpu_has_vmx_pat && paging_mode_hap(d) ) > vmx_disable_intercept_for_msr(v, MSR_IA32_CR_PAT, > MSR_TYPE_R | MSR_TYPE_W); > - if ( cpu_has_vmx_apic_reg_virt ) > - { > - int msr; > - for (msr = MSR_IA32_APICBASE_MSR; msr <> MSR_IA32_APICBASE_MSR + 0xff; msr++) > - vmx_disable_intercept_for_msr(v, msr, MSR_TYPE_R); > - } > - if ( cpu_has_vmx_virtual_intr_delivery ) > - { > - vmx_disable_intercept_for_msr(v, MSR_IA32_APICTPR_MSR, > MSR_TYPE_W); > - vmx_disable_intercept_for_msr(v, MSR_IA32_APICEOI_MSR, > MSR_TYPE_W); > - vmx_disable_intercept_for_msr(v, MSR_IA32_APICSELF_MSR, > MSR_TYPE_W); > - } > } > > /* I/O access bitmap. */ > diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c > index 362273b..855a003 100644 > --- a/xen/arch/x86/hvm/vmx/vmx.c > +++ b/xen/arch/x86/hvm/vmx/vmx.c > @@ -1887,18 +1887,61 @@ static void vmx_install_vlapic_mapping(struct > vcpu *v) > > void vmx_vlapic_msr_changed(struct vcpu *v) > { > + int virtualize_x2apic_mode; > struct vlapic *vlapic = vcpu_vlapic(v); > > - if ( !cpu_has_vmx_virtualize_apic_accesses ) > + virtualize_x2apic_mode = ( (cpu_has_vmx_apic_reg_virt || > + cpu_has_vmx_virtual_intr_delivery) > && > + > cpu_has_vmx_virtualize_x2apic_mode ); > + > + if ( !cpu_has_vmx_virtualize_apic_accesses && > + !virtualize_x2apic_mode ) > return; > > vmx_vmcs_enter(v); > v->arch.hvm_vmx.secondary_exec_control &> - ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; > + ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | > + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); > if ( !vlapic_hw_disabled(vlapic) && > (vlapic_base_address(vlapic) == APIC_DEFAULT_PHYS_BASE) ) > - v->arch.hvm_vmx.secondary_exec_control |> - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; > + { > + if ( virtualize_x2apic_mode && vlapic_x2apic_mode(vlapic) ) > + { > + v->arch.hvm_vmx.secondary_exec_control |> + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; > + if ( cpu_has_vmx_apic_reg_virt ) > + { > + for (int msr = MSR_IA32_APICBASE_MSR; > + msr <= MSR_IA32_APICBASE_MSR + 0xff; msr++) > + vmx_disable_intercept_for_msr(v, msr, > MSR_TYPE_R); > + > + vmx_enable_intercept_for_msr(v, > MSR_IA32_APICPPR_MSR, > + MSR_TYPE_R); > + vmx_enable_intercept_for_msr(v, > MSR_IA32_APICTMICT_MSR, > + MSR_TYPE_R); > + vmx_enable_intercept_for_msr(v, > MSR_IA32_APICTMCCT_MSR, > + MSR_TYPE_R); > + } > + if ( cpu_has_vmx_virtual_intr_delivery ) > + { > + vmx_disable_intercept_for_msr(v, > MSR_IA32_APICTPR_MSR, > + MSR_TYPE_W); > + vmx_disable_intercept_for_msr(v, > MSR_IA32_APICEOI_MSR, > + MSR_TYPE_W); > + vmx_disable_intercept_for_msr(v, > MSR_IA32_APICSELF_MSR, > + MSR_TYPE_W); > + } > + } > + else > + { > + v->arch.hvm_vmx.secondary_exec_control |> + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; > + for (int msr = MSR_IA32_APICBASE_MSR; > + msr <= MSR_IA32_APICBASE_MSR + 0xff; msr++) > + vmx_enable_intercept_for_msr(v, msr, > + MSR_TYPE_R | > MSR_TYPE_W); > + } > + } > vmx_update_secondary_exec_control(v); > vmx_vmcs_exit(v); > } > diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h > b/xen/include/asm-x86/hvm/vmx/vmcs.h > index d4958c3..25c94a6 100644 > --- a/xen/include/asm-x86/hvm/vmx/vmcs.h > +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h > @@ -184,6 +184,7 @@ extern u32 vmx_vmentry_control; > #define SECONDARY_EXEC_ENABLE_EPT 0x00000002 > #define SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING 0x00000004 > #define SECONDARY_EXEC_ENABLE_RDTSCP 0x00000008 > +#define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010 > #define SECONDARY_EXEC_ENABLE_VPID 0x00000020 > #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 > #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 > @@ -244,6 +245,8 @@ extern bool_t cpu_has_vmx_ins_outs_instr_info; > (vmx_secondary_exec_control & > SECONDARY_EXEC_APIC_REGISTER_VIRT) > #define cpu_has_vmx_virtual_intr_delivery \ > (vmx_secondary_exec_control & > SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) > +#define cpu_has_vmx_virtualize_x2apic_mode \ > + (vmx_secondary_exec_control & > SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE) > > /* GUEST_INTERRUPTIBILITY_INFO flags. */ > #define VMX_INTR_SHADOW_STI 0x00000001 > @@ -434,6 +437,7 @@ enum vmcs_field { > #define MSR_TYPE_R 1 > #define MSR_TYPE_W 2 > void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr, int type); > +void vmx_enable_intercept_for_msr(struct vcpu *v, u32 msr, int type); > int vmx_read_guest_msr(u32 msr, u64 *val); > int vmx_write_guest_msr(u32 msr, u64 val); > int vmx_add_guest_msr(u32 msr); > diff --git a/xen/include/asm-x86/msr-index.h > b/xen/include/asm-x86/msr-index.h > index 5c1de6e..f500efd 100644 > --- a/xen/include/asm-x86/msr-index.h > +++ b/xen/include/asm-x86/msr-index.h > @@ -300,7 +300,10 @@ > #define MSR_IA32_APICBASE_BASE (0xfffff<<12) > #define MSR_IA32_APICBASE_MSR 0x800 > #define MSR_IA32_APICTPR_MSR 0x808 > +#define MSR_IA32_APICPPR_MSR 0x80a > #define MSR_IA32_APICEOI_MSR 0x80b > +#define MSR_IA32_APICTMICT_MSR 0x838 > +#define MSR_IA32_APICTMCCT_MSR 0x839 > #define MSR_IA32_APICSELF_MSR 0x83f > > #define MSR_IA32_UCODE_WRITE 0x00000079 > -- > 1.7.1 > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xen.org > http://lists.xen.org/xen-devel