From 95aba3bcd84e5a8ff33b0da4300d6c6c2e35fc80 Mon Sep 17 00:00:00 2001 From: Liu Jinsong <jinsong.liu@intel.com> Date: Tue, 19 Nov 2013 18:47:44 +0800 Subject: [PATCH 3/5] X86: MPX IA32_BNDCFGS msr handle Signed-off-by: Xudong Hao <xudong.hao@intel.com> Reviewed-by: Liu Jinsong <jinsong.liu@intel.com> --- xen/arch/x86/hvm/hvm.c | 8 ++++++++ xen/arch/x86/hvm/vmx/vmcs.c | 8 ++++++-- xen/include/asm-x86/cpufeature.h | 2 ++ xen/include/asm-x86/hvm/vcpu.h | 2 ++ xen/include/asm-x86/hvm/vmx/vmcs.h | 2 ++ xen/include/asm-x86/msr-index.h | 2 ++ 6 files changed, 22 insertions(+), 2 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 3b353ec..416ad92 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -3008,6 +3008,10 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) hvm_get_guest_pat(v, msr_content); break; + case MSR_IA32_BNDCFGS: + *msr_content = v->arch.hvm_vcpu.bndcfgs; + break; + case MSR_MTRRcap: if ( !mtrr ) goto gp_fault; @@ -3131,6 +3135,10 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content) goto gp_fault; break; + case MSR_IA32_BNDCFGS: + v->arch.hvm_vcpu.bndcfgs = msr_content; + break; + case MSR_MTRRcap: if ( !mtrr ) goto gp_fault; diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index 290b42f..4a1f168 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -270,7 +270,8 @@ static int vmx_init_vmcs_config(void) } min = VM_EXIT_ACK_INTR_ON_EXIT; - opt = VM_EXIT_SAVE_GUEST_PAT | VM_EXIT_LOAD_HOST_PAT; + opt = VM_EXIT_SAVE_GUEST_PAT | VM_EXIT_LOAD_HOST_PAT | + VM_EXIT_CLEAR_BNDCFGS; min |= VM_EXIT_IA32E_MODE; _vmx_vmexit_control = adjust_vmx_controls( "VMExit Control", min, opt, MSR_IA32_VMX_EXIT_CTLS, &mismatch); @@ -284,7 +285,7 @@ static int vmx_init_vmcs_config(void) _vmx_pin_based_exec_control &= ~ PIN_BASED_POSTED_INTERRUPT; min = 0; - opt = VM_ENTRY_LOAD_GUEST_PAT; + opt = VM_ENTRY_LOAD_GUEST_PAT | VM_ENTRY_LOAD_BNDCFGS; _vmx_vmentry_control = adjust_vmx_controls( "VMEntry Control", min, opt, MSR_IA32_VMX_ENTRY_CTLS, &mismatch); @@ -955,6 +956,9 @@ static int construct_vmcs(struct vcpu *v) vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP, MSR_TYPE_R | MSR_TYPE_W); if ( paging_mode_hap(d) && (!iommu_enabled || iommu_snoop) ) vmx_disable_intercept_for_msr(v, MSR_IA32_CR_PAT, MSR_TYPE_R | MSR_TYPE_W); + if ( (vmexit_ctl & VM_EXIT_CLEAR_BNDCFGS) && + (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS) ) + vmx_disable_intercept_for_msr(v, MSR_IA32_BNDCFGS, MSR_TYPE_R | MSR_TYPE_W); } /* I/O access bitmap. */ diff --git a/xen/include/asm-x86/cpufeature.h b/xen/include/asm-x86/cpufeature.h index 1cfaf94..930dc9b 100644 --- a/xen/include/asm-x86/cpufeature.h +++ b/xen/include/asm-x86/cpufeature.h @@ -148,6 +148,7 @@ #define X86_FEATURE_INVPCID (7*32+10) /* Invalidate Process Context ID */ #define X86_FEATURE_RTM (7*32+11) /* Restricted Transactional Memory */ #define X86_FEATURE_NO_FPU_SEL (7*32+13) /* FPU CS/DS stored as zero */ +#define X86_FEATURE_MPX (7*32+14) /* Memory Protection Extensions */ #define X86_FEATURE_SMAP (7*32+20) /* Supervisor Mode Access Prevention */ #define cpu_has(c, bit) test_bit(bit, (c)->x86_capability) @@ -197,6 +198,7 @@ #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) #define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX) #define cpu_has_lwp boot_cpu_has(X86_FEATURE_LWP) +#define cpu_has_mpx boot_cpu_has(X86_FEATURE_MPX) #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h index a309389..e11ab72 100644 --- a/xen/include/asm-x86/hvm/vcpu.h +++ b/xen/include/asm-x86/hvm/vcpu.h @@ -162,6 +162,8 @@ struct hvm_vcpu { struct mtrr_state mtrr; u64 pat_cr; + u64 bndcfgs; + /* In mode delay_for_missed_ticks, VCPUs have differing guest times. */ int64_t stime_offset; diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h index ebaba5c..75cd653 100644 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -186,6 +186,7 @@ extern u32 vmx_pin_based_exec_control; #define VM_EXIT_SAVE_GUEST_EFER 0x00100000 #define VM_EXIT_LOAD_HOST_EFER 0x00200000 #define VM_EXIT_SAVE_PREEMPT_TIMER 0x00400000 +#define VM_EXIT_CLEAR_BNDCFGS 0x00800000 extern u32 vmx_vmexit_control; #define VM_ENTRY_IA32E_MODE 0x00000200 @@ -194,6 +195,7 @@ extern u32 vmx_vmexit_control; #define VM_ENTRY_LOAD_PERF_GLOBAL_CTRL 0x00002000 #define VM_ENTRY_LOAD_GUEST_PAT 0x00004000 #define VM_ENTRY_LOAD_GUEST_EFER 0x00008000 +#define VM_ENTRY_LOAD_BNDCFGS 0x00010000 extern u32 vmx_vmentry_control; #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h index e597a28..fef97a1 100644 --- a/xen/include/asm-x86/msr-index.h +++ b/xen/include/asm-x86/msr-index.h @@ -56,6 +56,8 @@ #define MSR_IA32_DS_AREA 0x00000600 #define MSR_IA32_PERF_CAPABILITIES 0x00000345 +#define MSR_IA32_BNDCFGS 0x00000D90 + #define MSR_MTRRfix64K_00000 0x00000250 #define MSR_MTRRfix16K_80000 0x00000258 #define MSR_MTRRfix16K_A0000 0x00000259 -- 1.7.1
On 19/11/13 10:51, Liu, Jinsong wrote:> From 95aba3bcd84e5a8ff33b0da4300d6c6c2e35fc80 Mon Sep 17 00:00:00 2001 > From: Liu Jinsong <jinsong.liu@intel.com> > Date: Tue, 19 Nov 2013 18:47:44 +0800 > Subject: [PATCH 3/5] X86: MPX IA32_BNDCFGS msr handle > > Signed-off-by: Xudong Hao <xudong.hao@intel.com> > Reviewed-by: Liu Jinsong <jinsong.liu@intel.com> > --- > xen/arch/x86/hvm/hvm.c | 8 ++++++++ > xen/arch/x86/hvm/vmx/vmcs.c | 8 ++++++-- > xen/include/asm-x86/cpufeature.h | 2 ++ > xen/include/asm-x86/hvm/vcpu.h | 2 ++ > xen/include/asm-x86/hvm/vmx/vmcs.h | 2 ++ > xen/include/asm-x86/msr-index.h | 2 ++ > 6 files changed, 22 insertions(+), 2 deletions(-) > > diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c > index 3b353ec..416ad92 100644 > --- a/xen/arch/x86/hvm/hvm.c > +++ b/xen/arch/x86/hvm/hvm.c > @@ -3008,6 +3008,10 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) > hvm_get_guest_pat(v, msr_content); > break; > > + case MSR_IA32_BNDCFGS: > + *msr_content = v->arch.hvm_vcpu.bndcfgs; > + break; > + > case MSR_MTRRcap: > if ( !mtrr ) > goto gp_fault; > @@ -3131,6 +3135,10 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content) > goto gp_fault; > break; > > + case MSR_IA32_BNDCFGS: > + v->arch.hvm_vcpu.bndcfgs = msr_content; > + break; > + > case MSR_MTRRcap: > if ( !mtrr ) > goto gp_fault; > diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c > index 290b42f..4a1f168 100644 > --- a/xen/arch/x86/hvm/vmx/vmcs.c > +++ b/xen/arch/x86/hvm/vmx/vmcs.c > @@ -270,7 +270,8 @@ static int vmx_init_vmcs_config(void) > } > > min = VM_EXIT_ACK_INTR_ON_EXIT; > - opt = VM_EXIT_SAVE_GUEST_PAT | VM_EXIT_LOAD_HOST_PAT; > + opt = VM_EXIT_SAVE_GUEST_PAT | VM_EXIT_LOAD_HOST_PAT | > + VM_EXIT_CLEAR_BNDCFGS; > min |= VM_EXIT_IA32E_MODE; > _vmx_vmexit_control = adjust_vmx_controls( > "VMExit Control", min, opt, MSR_IA32_VMX_EXIT_CTLS, &mismatch); > @@ -284,7 +285,7 @@ static int vmx_init_vmcs_config(void) > _vmx_pin_based_exec_control &= ~ PIN_BASED_POSTED_INTERRUPT; > > min = 0; > - opt = VM_ENTRY_LOAD_GUEST_PAT; > + opt = VM_ENTRY_LOAD_GUEST_PAT | VM_ENTRY_LOAD_BNDCFGS; > _vmx_vmentry_control = adjust_vmx_controls( > "VMEntry Control", min, opt, MSR_IA32_VMX_ENTRY_CTLS, &mismatch); > > @@ -955,6 +956,9 @@ static int construct_vmcs(struct vcpu *v) > vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP, MSR_TYPE_R | MSR_TYPE_W); > if ( paging_mode_hap(d) && (!iommu_enabled || iommu_snoop) ) > vmx_disable_intercept_for_msr(v, MSR_IA32_CR_PAT, MSR_TYPE_R | MSR_TYPE_W); > + if ( (vmexit_ctl & VM_EXIT_CLEAR_BNDCFGS) && > + (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS) ) > + vmx_disable_intercept_for_msr(v, MSR_IA32_BNDCFGS, MSR_TYPE_R | MSR_TYPE_W);So if vmentry/exit supports loading/clearing BNDCFGS, we don''t intercept the MSRs. Are they stored in the VMCS in this case? In the case that we intercept the MSRs, how and where do they get saved/restored on context switch? ~Andrew> } > > /* I/O access bitmap. */ > diff --git a/xen/include/asm-x86/cpufeature.h b/xen/include/asm-x86/cpufeature.h > index 1cfaf94..930dc9b 100644 > --- a/xen/include/asm-x86/cpufeature.h > +++ b/xen/include/asm-x86/cpufeature.h > @@ -148,6 +148,7 @@ > #define X86_FEATURE_INVPCID (7*32+10) /* Invalidate Process Context ID */ > #define X86_FEATURE_RTM (7*32+11) /* Restricted Transactional Memory */ > #define X86_FEATURE_NO_FPU_SEL (7*32+13) /* FPU CS/DS stored as zero */ > +#define X86_FEATURE_MPX (7*32+14) /* Memory Protection Extensions */ > #define X86_FEATURE_SMAP (7*32+20) /* Supervisor Mode Access Prevention */ > > #define cpu_has(c, bit) test_bit(bit, (c)->x86_capability) > @@ -197,6 +198,7 @@ > #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) > #define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX) > #define cpu_has_lwp boot_cpu_has(X86_FEATURE_LWP) > +#define cpu_has_mpx boot_cpu_has(X86_FEATURE_MPX) > > #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) > > diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h > index a309389..e11ab72 100644 > --- a/xen/include/asm-x86/hvm/vcpu.h > +++ b/xen/include/asm-x86/hvm/vcpu.h > @@ -162,6 +162,8 @@ struct hvm_vcpu { > struct mtrr_state mtrr; > u64 pat_cr; > > + u64 bndcfgs; > + > /* In mode delay_for_missed_ticks, VCPUs have differing guest times. */ > int64_t stime_offset; > > diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h > index ebaba5c..75cd653 100644 > --- a/xen/include/asm-x86/hvm/vmx/vmcs.h > +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h > @@ -186,6 +186,7 @@ extern u32 vmx_pin_based_exec_control; > #define VM_EXIT_SAVE_GUEST_EFER 0x00100000 > #define VM_EXIT_LOAD_HOST_EFER 0x00200000 > #define VM_EXIT_SAVE_PREEMPT_TIMER 0x00400000 > +#define VM_EXIT_CLEAR_BNDCFGS 0x00800000 > extern u32 vmx_vmexit_control; > > #define VM_ENTRY_IA32E_MODE 0x00000200 > @@ -194,6 +195,7 @@ extern u32 vmx_vmexit_control; > #define VM_ENTRY_LOAD_PERF_GLOBAL_CTRL 0x00002000 > #define VM_ENTRY_LOAD_GUEST_PAT 0x00004000 > #define VM_ENTRY_LOAD_GUEST_EFER 0x00008000 > +#define VM_ENTRY_LOAD_BNDCFGS 0x00010000 > extern u32 vmx_vmentry_control; > > #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 > diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h > index e597a28..fef97a1 100644 > --- a/xen/include/asm-x86/msr-index.h > +++ b/xen/include/asm-x86/msr-index.h > @@ -56,6 +56,8 @@ > #define MSR_IA32_DS_AREA 0x00000600 > #define MSR_IA32_PERF_CAPABILITIES 0x00000345 > > +#define MSR_IA32_BNDCFGS 0x00000D90 > + > #define MSR_MTRRfix64K_00000 0x00000250 > #define MSR_MTRRfix16K_80000 0x00000258 > #define MSR_MTRRfix16K_A0000 0x00000259
>>> On 21.11.13 at 16:30, Andrew Cooper <andrew.cooper3@citrix.com> wrote: > On 19/11/13 10:51, Liu, Jinsong wrote: >> @@ -955,6 +956,9 @@ static int construct_vmcs(struct vcpu *v) >> vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP, MSR_TYPE_R | MSR_TYPE_W); >> if ( paging_mode_hap(d) && (!iommu_enabled || iommu_snoop) ) >> vmx_disable_intercept_for_msr(v, MSR_IA32_CR_PAT, MSR_TYPE_R | MSR_TYPE_W); >> + if ( (vmexit_ctl & VM_EXIT_CLEAR_BNDCFGS) && >> + (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS) ) >> + vmx_disable_intercept_for_msr(v, MSR_IA32_BNDCFGS, MSR_TYPE_R | MSR_TYPE_W); > > So if vmentry/exit supports loading/clearing BNDCFGS, we don''t intercept > the MSRs. > > Are they stored in the VMCS in this case? > > In the case that we intercept the MSRs, how and where do they get > saved/restored on context switch?Yeah,m the code is clearly missing a vmx_add_guest_msr() or some such. Jan
Jan Beulich wrote:>>>> On 21.11.13 at 16:30, Andrew Cooper <andrew.cooper3@citrix.com> >>>> wrote: >> On 19/11/13 10:51, Liu, Jinsong wrote: >>> @@ -955,6 +956,9 @@ static int construct_vmcs(struct vcpu *v) >>> vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP, >>> MSR_TYPE_R | MSR_TYPE_W); if ( paging_mode_hap(d) && >>> (!iommu_enabled || iommu_snoop) ) >>> vmx_disable_intercept_for_msr(v, MSR_IA32_CR_PAT, MSR_TYPE_R | >>> MSR_TYPE_W); + if ( (vmexit_ctl & VM_EXIT_CLEAR_BNDCFGS) && >>> + (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS) ) + >>> vmx_disable_intercept_for_msr(v, MSR_IA32_BNDCFGS, MSR_TYPE_R | >>> MSR_TYPE_W); >> >> So if vmentry/exit supports loading/clearing BNDCFGS, we don''t >> intercept the MSRs. >> >> Are they stored in the VMCS in this case? >> >> In the case that we intercept the MSRs, how and where do they get >> saved/restored on context switch? > > Yeah,m the code is clearly missing a vmx_add_guest_msr() or some > such. >Thanks! and we also need vmx_add_host_load_msr() and implicitly clear host MSR_IA32_BNDCFGS, right?
>>> On 22.11.13 at 17:33, "Liu, Jinsong" <jinsong.liu@intel.com> wrote: > Jan Beulich wrote: >>>>> On 21.11.13 at 16:30, Andrew Cooper <andrew.cooper3@citrix.com> >>>>> wrote: >>> On 19/11/13 10:51, Liu, Jinsong wrote: >>>> @@ -955,6 +956,9 @@ static int construct_vmcs(struct vcpu *v) >>>> vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP, >>>> MSR_TYPE_R | MSR_TYPE_W); if ( paging_mode_hap(d) && >>>> (!iommu_enabled || iommu_snoop) ) >>>> vmx_disable_intercept_for_msr(v, MSR_IA32_CR_PAT, MSR_TYPE_R | >>>> MSR_TYPE_W); + if ( (vmexit_ctl & VM_EXIT_CLEAR_BNDCFGS) && >>>> + (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS) ) + >>>> vmx_disable_intercept_for_msr(v, MSR_IA32_BNDCFGS, MSR_TYPE_R | >>>> MSR_TYPE_W); >>> >>> So if vmentry/exit supports loading/clearing BNDCFGS, we don''t >>> intercept the MSRs. >>> >>> Are they stored in the VMCS in this case?Please also explicitly address this question Andrew had raised. I suppose we could go hunt for the information in the spec, but I''m sure you know the answer without needing to waste much time.>>> In the case that we intercept the MSRs, how and where do they get >>> saved/restored on context switch? >> >> Yeah,m the code is clearly missing a vmx_add_guest_msr() or some >> such. > > Thanks! and we also need vmx_add_host_load_msr() and implicitly clear host > MSR_IA32_BNDCFGS, right?Yes, I think so. Jan
Jan Beulich wrote:>>>> On 22.11.13 at 17:33, "Liu, Jinsong" <jinsong.liu@intel.com> wrote: >> Jan Beulich wrote: >>>>>> On 21.11.13 at 16:30, Andrew Cooper <andrew.cooper3@citrix.com> >>>>>> wrote: >>>> On 19/11/13 10:51, Liu, Jinsong wrote: >>>>> @@ -955,6 +956,9 @@ static int construct_vmcs(struct vcpu *v) >>>>> vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP, >>>>> MSR_TYPE_R | MSR_TYPE_W); if ( paging_mode_hap(d) && >>>>> (!iommu_enabled || iommu_snoop) ) >>>>> vmx_disable_intercept_for_msr(v, MSR_IA32_CR_PAT, MSR_TYPE_R | >>>>> MSR_TYPE_W); + if ( (vmexit_ctl & VM_EXIT_CLEAR_BNDCFGS) && >>>>> + (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS) ) + >>>>> vmx_disable_intercept_for_msr(v, MSR_IA32_BNDCFGS, MSR_TYPE_R | >>>>> MSR_TYPE_W); >>>> >>>> So if vmentry/exit supports loading/clearing BNDCFGS, we don''t >>>> intercept the MSRs. >>>> >>>> Are they stored in the VMCS in this case?Ah, sorry, yes. A new guest-state field for IA32_BNDCFGS is added to the VMCS. In addition, two new controls are added: * a VM-exit control called "clear BNDCFGS" * a VM-entry control called "load BNDCFGS." Thanks, Jinsong> > Please also explicitly address this question Andrew had raised. I > suppose we could go hunt for the information in the spec, but I''m > sure you know the answer without needing to waste much time. > >>>> In the case that we intercept the MSRs, how and where do they get >>>> saved/restored on context switch? >>> >>> Yeah,m the code is clearly missing a vmx_add_guest_msr() or some >>> such. >> >> Thanks! and we also need vmx_add_host_load_msr() and implicitly >> clear host MSR_IA32_BNDCFGS, right? > > Yes, I think so. > > Jan