From ca79f76865b7d93292dc8fea280b541b56bfd5a7 Mon Sep 17 00:00:00 2001
From: Liu Jinsong <jinsong.liu@intel.com>
Date: Mon, 2 Dec 2013 04:18:52 +0800
Subject: [PATCH v4 6/7] X86: MSR_IA32_BNDCFGS save/restore
Signed-off-by: Liu Jinsong <jinsong.liu@intel.com>
---
xen/arch/x86/hvm/vmx/vmx.c | 50 +++++++++++++++++++++++++++++++++++-
xen/include/asm-x86/hvm/vmx/vmcs.h | 2 +
2 files changed, 51 insertions(+), 1 deletions(-)
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index e2d19df..d18b2be 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -580,13 +580,61 @@ static int vmx_load_vmcs_ctxt(struct vcpu *v, struct
hvm_hw_cpu *ctxt)
return 0;
}
-/* Temporarily NULL, could be added in the future */
+static bool_t vmx_has_mpx(void)
+{
+ unsigned int eax, ebx, edx, ecx = 0;
+
+ hvm_cpuid(7, &eax, &ebx, &ecx, &edx);
+
+ return !!(ebx & cpufeat_mask(X86_FEATURE_MPX));
+}
+
+static void vmx_save_msr_bndcfgs(struct vcpu *v, struct hvm_msr *ctxt)
+{
+ if ( !vmx_has_mpx() )
+ return;
+
+ ctxt->msr[ctxt->count].index = MSR_IA32_BNDCFGS;
+ vmx_vmcs_enter(v);
+ __vmread(GUEST_BNDCFGS, &ctxt->msr[ctxt->count++].val);
+ vmx_vmcs_exit(v);
+}
+
+static int vmx_load_msr_bndcfgs(struct vcpu *v, uint64_t val)
+{
+ if ( !vmx_has_mpx() )
+ return -EINVAL;
+
+ vmx_vmcs_enter(v);
+ __vmwrite(GUEST_BNDCFGS, val);
+ vmx_vmcs_exit(v);
+
+ return 0;
+}
+
static void vmx_save_msr(struct vcpu *v, struct hvm_msr *ctxt)
{
+ /* save MSR_IA32_BNDCFGS */
+ vmx_save_msr_bndcfgs(v, ctxt);
}
static int vmx_load_msr(struct vcpu *v, struct hvm_msr *ctxt)
{
+ int i;
+
+ for ( i = 0; i < ctxt->count; i++ )
+ {
+ switch ( ctxt->msr[i].index )
+ {
+ case MSR_IA32_BNDCFGS:
+ if ( vmx_load_msr_bndcfgs(v, ctxt->msr[i].val) )
+ return -EINVAL;
+ break;
+ default:
+ break;
+ }
+ }
+
return 0;
}
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h
b/xen/include/asm-x86/hvm/vmx/vmcs.h
index 75cd653..45dcfa1 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -367,6 +367,8 @@ enum vmcs_field {
GUEST_PDPTR2_HIGH = 0x0000280f,
GUEST_PDPTR3 = 0x00002810,
GUEST_PDPTR3_HIGH = 0x00002811,
+ GUEST_BNDCFGS = 0x00002812,
+ GUEST_BNDCFGS_HIGH = 0x00002813,
HOST_PAT = 0x00002c00,
HOST_PAT_HIGH = 0x00002c01,
HOST_EFER = 0x00002c02,
--
1.7.1
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
>>> On 02.12.13 at 09:54, "Liu, Jinsong" <jinsong.liu@intel.com> wrote: > --- a/xen/arch/x86/hvm/vmx/vmx.c > +++ b/xen/arch/x86/hvm/vmx/vmx.c > @@ -580,13 +580,61 @@ static int vmx_load_vmcs_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt) > return 0; > } > > -/* Temporarily NULL, could be added in the future */ > +static bool_t vmx_has_mpx(void) > +{ > + unsigned int eax, ebx, edx, ecx = 0; > + > + hvm_cpuid(7, &eax, &ebx, &ecx, &edx);hvm_cpuid() accepts NULL now for outputs you don''t care about.> + > + return !!(ebx & cpufeat_mask(X86_FEATURE_MPX)); > +}And then this function clearly isn''t VMX-specific. But with the above I don''t think you need a separate function here in the first place.> + > +static void vmx_save_msr_bndcfgs(struct vcpu *v, struct hvm_msr *ctxt) > +{ > + if ( !vmx_has_mpx() ) > + return; > + > + ctxt->msr[ctxt->count].index = MSR_IA32_BNDCFGS; > + vmx_vmcs_enter(v); > + __vmread(GUEST_BNDCFGS, &ctxt->msr[ctxt->count++].val); > + vmx_vmcs_exit(v); > +} > + > +static int vmx_load_msr_bndcfgs(struct vcpu *v, uint64_t val) > +{ > + if ( !vmx_has_mpx() ) > + return -EINVAL; > + > + vmx_vmcs_enter(v); > + __vmwrite(GUEST_BNDCFGS, val); > + vmx_vmcs_exit(v); > + > + return 0; > +}This looks inefficient - I''d assume that (almost) all MSRs that VMX specific code would need to deal with will go into a VMCS field. Hence entering/exiting the VMCS for each individual MSR seems odd. And once again, with that addressed there''s hardly a reason not to integrate these into their callers.> static int vmx_load_msr(struct vcpu *v, struct hvm_msr *ctxt) > { > + int i; > + > + for ( i = 0; i < ctxt->count; i++ ) > + { > + switch ( ctxt->msr[i].index ) > + { > + case MSR_IA32_BNDCFGS: > + if ( vmx_load_msr_bndcfgs(v, ctxt->msr[i].val) ) > + return -EINVAL; > + break; > + default: > + break; > + } > + } > + > return 0; > }This points out a shortcoming of the interface: Generic code ought to have a way to know which of the incoming records were processed by the vendor specific code. Then, after processing eventual vendor independent records, it''ll know whether all records got consumed. And if not, the restore operation _must_ fail. Jan