From 64cf72388108834db156a4e7ac5af7f3f45c7618 Mon Sep 17 00:00:00 2001 From: Liu Jinsong <jinsong.liu@intel.com> Date: Thu, 28 Nov 2013 09:21:27 +0800 Subject: [PATCH 4/6] X86: MPX IA32_BNDCFGS msr save/restore Signed-off-by: Liu Jinsong <jinsong.liu@intel.com> --- xen/arch/x86/hvm/hvm.c | 42 ++++++++++++++++++++++++++++++++ xen/arch/x86/hvm/vmx/vmx.c | 31 +++++++++++++++++++++++ xen/include/asm-x86/hvm/hvm.h | 3 ++ xen/include/asm-x86/hvm/vmx/vmcs.h | 2 + xen/include/public/arch-x86/hvm/save.h | 8 +++++- 5 files changed, 85 insertions(+), 1 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 0f7178b..4b1dc31 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -702,6 +702,48 @@ static int hvm_load_tsc_adjust(struct domain *d, hvm_domain_context_t *h) HVM_REGISTER_SAVE_RESTORE(TSC_ADJUST, hvm_save_tsc_adjust, hvm_load_tsc_adjust, 1, HVMSR_PER_VCPU); +static int hvm_save_msr_bndcfgs(struct domain *d, hvm_domain_context_t *h) +{ + struct vcpu *v; + struct hvm_msr_bndcfgs ctxt = {0}; + int err = 0; + + for_each_vcpu ( d, v ) + { + hvm_funcs.save_msr_bndcfgs(v, &ctxt); + + err = hvm_save_entry(MSR_BNDCFGS, v->vcpu_id, h, &ctxt); + if ( err ) + break; + } + + return err; +} + +static int hvm_load_msr_bndcfgs(struct domain *d, hvm_domain_context_t *h) +{ + unsigned int vcpuid = hvm_load_instance(h); + struct vcpu *v; + struct hvm_msr_bndcfgs ctxt; + + if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL ) + { + dprintk(XENLOG_G_ERR, "HVM restore: dom%d has no vcpu%u\n", + d->domain_id, vcpuid); + return -EINVAL; + } + + if ( hvm_load_entry(MSR_BNDCFGS, h, &ctxt) != 0 ) + return -EINVAL; + + hvm_funcs.load_msr_bndcfgs(v, &ctxt); + + return 0; +} + +HVM_REGISTER_SAVE_RESTORE(MSR_BNDCFGS, hvm_save_msr_bndcfgs, + hvm_load_msr_bndcfgs, 1, HVMSR_PER_VCPU); + static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h) { struct vcpu *v; diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index f0132a4..6f24be7 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -580,6 +580,35 @@ static int vmx_load_vmcs_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt) return 0; } +static bool_t vmx_has_mpx(void) +{ + unsigned int eax, ebx, edx, ecx = 0; + + hvm_cpuid(7, &eax, &ebx, &ecx, &edx); + + return !!(ebx & cpufeat_mask(X86_FEATURE_MPX)); +} + +static void vmx_save_msr_bndcfgs(struct vcpu *v, struct hvm_msr_bndcfgs *ctxt) +{ + if ( !vmx_has_mpx() ) + return; + + vmx_vmcs_enter(v); + __vmread(GUEST_BNDCFGS, &ctxt->msr_bndcfgs); + vmx_vmcs_exit(v); +} + +static void vmx_load_msr_bndcfgs(struct vcpu *v, struct hvm_msr_bndcfgs *ctxt) +{ + if ( !vmx_has_mpx() ) + return; + + vmx_vmcs_enter(v); + __vmwrite(GUEST_BNDCFGS, ctxt->msr_bndcfgs); + vmx_vmcs_exit(v); +} + static void vmx_fpu_enter(struct vcpu *v) { vcpu_restore_fpu_lazy(v); @@ -1606,6 +1635,8 @@ static struct hvm_function_table __initdata vmx_function_table = { .vcpu_destroy = vmx_vcpu_destroy, .save_cpu_ctxt = vmx_save_vmcs_ctxt, .load_cpu_ctxt = vmx_load_vmcs_ctxt, + .save_msr_bndcfgs = vmx_save_msr_bndcfgs, + .load_msr_bndcfgs = vmx_load_msr_bndcfgs, .get_interrupt_shadow = vmx_get_interrupt_shadow, .set_interrupt_shadow = vmx_set_interrupt_shadow, .guest_x86_mode = vmx_guest_x86_mode, diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index a8ba06d..81335af 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -109,6 +109,9 @@ struct hvm_function_table { void (*save_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt); int (*load_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt); + void (*save_msr_bndcfgs)(struct vcpu *v, struct hvm_msr_bndcfgs *ctxt); + void (*load_msr_bndcfgs)(struct vcpu *v, struct hvm_msr_bndcfgs *ctxt); + /* Examine specifics of the guest state. */ unsigned int (*get_interrupt_shadow)(struct vcpu *v); void (*set_interrupt_shadow)(struct vcpu *v, unsigned int intr_shadow); diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h index 75cd653..45dcfa1 100644 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -367,6 +367,8 @@ enum vmcs_field { GUEST_PDPTR2_HIGH = 0x0000280f, GUEST_PDPTR3 = 0x00002810, GUEST_PDPTR3_HIGH = 0x00002811, + GUEST_BNDCFGS = 0x00002812, + GUEST_BNDCFGS_HIGH = 0x00002813, HOST_PAT = 0x00002c00, HOST_PAT_HIGH = 0x00002c01, HOST_EFER = 0x00002c02, diff --git a/xen/include/public/arch-x86/hvm/save.h b/xen/include/public/arch-x86/hvm/save.h index 3664aaf..0ad4a0c 100644 --- a/xen/include/public/arch-x86/hvm/save.h +++ b/xen/include/public/arch-x86/hvm/save.h @@ -592,9 +592,15 @@ struct hvm_tsc_adjust { DECLARE_HVM_SAVE_TYPE(TSC_ADJUST, 19, struct hvm_tsc_adjust); +struct hvm_msr_bndcfgs { + uint64_t msr_bndcfgs; +}; + +DECLARE_HVM_SAVE_TYPE(MSR_BNDCFGS, 20, struct hvm_msr_bndcfgs); + /* * Largest type-code in use */ -#define HVM_SAVE_CODE_MAX 19 +#define HVM_SAVE_CODE_MAX 20 #endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */ -- 1.7.1 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel
>>> On 28.11.13 at 07:36, "Liu, Jinsong" <jinsong.liu@intel.com> wrote: > --- a/xen/arch/x86/hvm/hvm.c > +++ b/xen/arch/x86/hvm/hvm.c > @@ -702,6 +702,48 @@ static int hvm_load_tsc_adjust(struct domain *d, hvm_domain_context_t *h) > HVM_REGISTER_SAVE_RESTORE(TSC_ADJUST, hvm_save_tsc_adjust, > hvm_load_tsc_adjust, 1, HVMSR_PER_VCPU); > > +static int hvm_save_msr_bndcfgs(struct domain *d, hvm_domain_context_t *h) > +{ > + struct vcpu *v; > + struct hvm_msr_bndcfgs ctxt = {0}; > + int err = 0; > + > + for_each_vcpu ( d, v ) > + { > + hvm_funcs.save_msr_bndcfgs(v, &ctxt); > + > + err = hvm_save_entry(MSR_BNDCFGS, v->vcpu_id, h, &ctxt); > + if ( err ) > + break; > + } > + > + return err; > +} > + > +static int hvm_load_msr_bndcfgs(struct domain *d, hvm_domain_context_t *h) > +{ > + unsigned int vcpuid = hvm_load_instance(h); > + struct vcpu *v; > + struct hvm_msr_bndcfgs ctxt; > + > + if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL ) > + { > + dprintk(XENLOG_G_ERR, "HVM restore: dom%d has no vcpu%u\n", > + d->domain_id, vcpuid); > + return -EINVAL; > + } > + > + if ( hvm_load_entry(MSR_BNDCFGS, h, &ctxt) != 0 ) > + return -EINVAL; > + > + hvm_funcs.load_msr_bndcfgs(v, &ctxt); > + > + return 0; > +} > + > +HVM_REGISTER_SAVE_RESTORE(MSR_BNDCFGS, hvm_save_msr_bndcfgs, > + hvm_load_msr_bndcfgs, 1, HVMSR_PER_VCPU);That''s exactly the non-extensible model I expected you to use, and that I tried to keep you from following with pointing out that the vPMU also has at least one MSR not currently saved/restored properly. Instead I think we should have a generic MSR save/restore type, pairing MSR indices and values in an array like fashion. If the restoring host doesn''t know how to handle any one of them, the restore would fail. Extending it with further MSR values would then become a much smaller change than the full blown addition of a new save/restore type. Jan
Jan Beulich wrote:>>>> On 28.11.13 at 07:36, "Liu, Jinsong" <jinsong.liu@intel.com> wrote: >> --- a/xen/arch/x86/hvm/hvm.c >> +++ b/xen/arch/x86/hvm/hvm.c >> @@ -702,6 +702,48 @@ static int hvm_load_tsc_adjust(struct domain >> *d, hvm_domain_context_t *h) HVM_REGISTER_SAVE_RESTORE(TSC_ADJUST, >> hvm_save_tsc_adjust, hvm_load_tsc_adjust, >> 1, HVMSR_PER_VCPU); >> >> +static int hvm_save_msr_bndcfgs(struct domain *d, >> hvm_domain_context_t *h) +{ + struct vcpu *v; >> + struct hvm_msr_bndcfgs ctxt = {0}; >> + int err = 0; >> + >> + for_each_vcpu ( d, v ) >> + { >> + hvm_funcs.save_msr_bndcfgs(v, &ctxt); >> + >> + err = hvm_save_entry(MSR_BNDCFGS, v->vcpu_id, h, &ctxt); + >> if ( err ) + break; >> + } >> + >> + return err; >> +} >> + >> +static int hvm_load_msr_bndcfgs(struct domain *d, >> hvm_domain_context_t *h) +{ + unsigned int vcpuid >> hvm_load_instance(h); + struct vcpu *v; >> + struct hvm_msr_bndcfgs ctxt; >> + >> + if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL ) >> + { + dprintk(XENLOG_G_ERR, "HVM restore: dom%d has no >> vcpu%u\n", + d->domain_id, vcpuid); >> + return -EINVAL; >> + } >> + >> + if ( hvm_load_entry(MSR_BNDCFGS, h, &ctxt) != 0 ) + >> return -EINVAL; + >> + hvm_funcs.load_msr_bndcfgs(v, &ctxt); >> + >> + return 0; >> +} >> + >> +HVM_REGISTER_SAVE_RESTORE(MSR_BNDCFGS, hvm_save_msr_bndcfgs, >> + hvm_load_msr_bndcfgs, 1, HVMSR_PER_VCPU); > > That''s exactly the non-extensible model I expected you to use, > and that I tried to keep you from following with pointing out that > the vPMU also has at least one MSR not currently saved/restored > properly. > > Instead I think we should have a generic MSR save/restore type, > pairing MSR indices and values in an array like fashion. If the > restoring host doesn''t know how to handle any one of them, the > restore would fail. Extending it with further MSR values would > then become a much smaller change than the full blown addition > of a new save/restore type. > > JanOK, I have implemented a generic MSR save/restore way, w/ MPX MSR. As for vPMU it''s a distinct issue. Currently Xen vPMU does not support migration because of interrupt issue, and it didn''t consider version issue of architectural PMU which under different version there are different MSRs. Thanks, Jinsong
>>> On 02.12.13 at 09:44, "Liu, Jinsong" <jinsong.liu@intel.com> wrote: > Jan Beulich wrote: >> Instead I think we should have a generic MSR save/restore type, >> pairing MSR indices and values in an array like fashion. If the >> restoring host doesn''t know how to handle any one of them, the >> restore would fail. Extending it with further MSR values would >> then become a much smaller change than the full blown addition >> of a new save/restore type. > > OK, I have implemented a generic MSR save/restore way, w/ MPX MSR.Thanks - I''ll take a look.> As for vPMU it''s a distinct issue. Currently Xen vPMU does not support > migration because of interrupt issue, and it didn''t consider version issue of > architectural PMU which under different version there are different MSRs.Okay, so no immediate need to include that MSR here. But is there anything done to prevent migration of a domain using the vPMU? Jan
Jan Beulich wrote:>>>> On 02.12.13 at 09:44, "Liu, Jinsong" <jinsong.liu@intel.com> wrote: >> Jan Beulich wrote: >>> Instead I think we should have a generic MSR save/restore type, >>> pairing MSR indices and values in an array like fashion. If the >>> restoring host doesn''t know how to handle any one of them, the >>> restore would fail. Extending it with further MSR values would >>> then become a much smaller change than the full blown addition >>> of a new save/restore type. >> >> OK, I have implemented a generic MSR save/restore way, w/ MPX MSR. > > Thanks - I''ll take a look. > >> As for vPMU it''s a distinct issue. Currently Xen vPMU does not >> support migration because of interrupt issue, and it didn''t consider >> version issue of architectural PMU which under different version >> there are different MSRs. > > Okay, so no immediate need to include that MSR here. But is there > anything done to prevent migration of a domain using the vPMU? >CC Haitao who is author of Xen vPMU :) Thanks, Jinsong