From 03cd22f38b4974e3848acd7695aa27fe4bed3c00 Mon Sep 17 00:00:00 2001
From: Liu Jinsong <jinsong.liu@intel.com>
Date: Mon, 2 Dec 2013 01:14:48 +0800
Subject: [PATCH v4 4/7] X86: generic MSRs save/restore
This patch introduced a generic MSRs save/restore mechanism, so that
in the future new MSRs save/restore could be added w/ smaller change
than the full blown addition of a new save/restore type.
Suggested-by: Jan Beulich <jbeulich@suse.com>
Signed-off-by: Liu Jinsong <jinsong.liu@intel.com>
---
 xen/arch/x86/hvm/hvm.c                 |   65 ++++++++++++++++++++++++++++++++
 xen/arch/x86/hvm/vmx/vmx.c             |   12 ++++++
 xen/include/asm-x86/hvm/hvm.h          |    3 +
 xen/include/public/arch-x86/hvm/save.h |   15 +++++++-
 4 files changed, 94 insertions(+), 1 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 0f7178b..784cf6a 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -702,6 +702,71 @@ static int hvm_load_tsc_adjust(struct domain *d,
hvm_domain_context_t *h)
 HVM_REGISTER_SAVE_RESTORE(TSC_ADJUST, hvm_save_tsc_adjust,
                           hvm_load_tsc_adjust, 1, HVMSR_PER_VCPU);
 
+/* Temporarily NULL, could be added in the future */
+static void hvm_save_msr_common(struct vcpu *v, struct hvm_msr *ctxt)
+{
+}
+
+static int hvm_load_msr_common(struct vcpu *v, struct hvm_msr *ctxt)
+{
+    return 0;
+}
+
+static int hvm_save_msr(struct domain *d, hvm_domain_context_t *h)
+{
+    struct vcpu *v;
+    struct hvm_msr ctxt;
+    int err = 0;
+
+    for_each_vcpu ( d, v )
+    {
+        memset(&ctxt, 0, sizeof(ctxt));
+
+        /* For common msrs */
+        hvm_save_msr_common(v, &ctxt);
+
+        /* For vmx/svm specific msrs */
+        if ( hvm_funcs.save_msr )
+            hvm_funcs.save_msr(v, &ctxt);
+
+        err = hvm_save_entry(HVM_MSR, v->vcpu_id, h, &ctxt);
+        if ( err )
+            break;
+    }
+
+    return err;
+}
+
+static int hvm_load_msr(struct domain *d, hvm_domain_context_t *h)
+{
+    unsigned int vcpuid = hvm_load_instance(h);
+    struct vcpu *v;
+    struct hvm_msr ctxt;
+
+    if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
+    {
+        dprintk(XENLOG_G_ERR, "HVM restore: dom%d has no vcpu%u\n",
+                d->domain_id, vcpuid);
+        return -EINVAL;
+    }
+
+    if ( hvm_load_entry(HVM_MSR, h, &ctxt) != 0 )
+        return -EINVAL;
+
+    /* For common msrs */
+    if ( hvm_load_msr_common(v, &ctxt) )
+        return -EINVAL;
+
+    /* For vmx/svm specific msrs */
+    if ( hvm_funcs.load_msr )
+        return hvm_funcs.load_msr(v, &ctxt);
+
+    return 0;
+}
+
+HVM_REGISTER_SAVE_RESTORE(HVM_MSR, hvm_save_msr,
+                          hvm_load_msr, 1, HVMSR_PER_VCPU);
+
 static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
 {
     struct vcpu *v;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index f0132a4..e2d19df 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -580,6 +580,16 @@ static int vmx_load_vmcs_ctxt(struct vcpu *v, struct
hvm_hw_cpu *ctxt)
     return 0;
 }
 
+/* Temporarily NULL, could be added in the future */
+static void vmx_save_msr(struct vcpu *v, struct hvm_msr *ctxt)
+{
+}
+
+static int vmx_load_msr(struct vcpu *v, struct hvm_msr *ctxt)
+{
+    return 0;
+}
+
 static void vmx_fpu_enter(struct vcpu *v)
 {
     vcpu_restore_fpu_lazy(v);
@@ -1606,6 +1616,8 @@ static struct hvm_function_table __initdata
vmx_function_table = {
     .vcpu_destroy         = vmx_vcpu_destroy,
     .save_cpu_ctxt        = vmx_save_vmcs_ctxt,
     .load_cpu_ctxt        = vmx_load_vmcs_ctxt,
+    .save_msr             = vmx_save_msr,
+    .load_msr             = vmx_load_msr,
     .get_interrupt_shadow = vmx_get_interrupt_shadow,
     .set_interrupt_shadow = vmx_set_interrupt_shadow,
     .guest_x86_mode       = vmx_guest_x86_mode,
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index a8ba06d..1c09d41 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -109,6 +109,9 @@ struct hvm_function_table {
     void (*save_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
     int (*load_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
 
+    void (*save_msr)(struct vcpu *v, struct hvm_msr *ctxt);
+    int (*load_msr)(struct vcpu *v, struct hvm_msr *ctxt);
+
     /* Examine specifics of the guest state. */
     unsigned int (*get_interrupt_shadow)(struct vcpu *v);
     void (*set_interrupt_shadow)(struct vcpu *v, unsigned int intr_shadow);
diff --git a/xen/include/public/arch-x86/hvm/save.h
b/xen/include/public/arch-x86/hvm/save.h
index 3664aaf..bb40b55 100644
--- a/xen/include/public/arch-x86/hvm/save.h
+++ b/xen/include/public/arch-x86/hvm/save.h
@@ -592,9 +592,22 @@ struct hvm_tsc_adjust {
 
 DECLARE_HVM_SAVE_TYPE(TSC_ADJUST, 19, struct hvm_tsc_adjust);
 
+#define MSR_SAVE_LOAD_MAX 16
+struct msr_save_load {
+    uint32_t index;
+    uint64_t val;
+};
+
+struct hvm_msr {
+    unsigned int count;
+    struct msr_save_load msr[MSR_SAVE_LOAD_MAX];
+};
+
+DECLARE_HVM_SAVE_TYPE(HVM_MSR, 20, struct hvm_msr);
+
 /* 
  * Largest type-code in use
  */
-#define HVM_SAVE_CODE_MAX 19
+#define HVM_SAVE_CODE_MAX 20
 
 #endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */
-- 
1.7.1
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
>>> On 02.12.13 at 09:52, "Liu, Jinsong" <jinsong.liu@intel.com> wrote: > --- a/xen/include/public/arch-x86/hvm/save.h > +++ b/xen/include/public/arch-x86/hvm/save.h > @@ -592,9 +592,22 @@ struct hvm_tsc_adjust { > > DECLARE_HVM_SAVE_TYPE(TSC_ADJUST, 19, struct hvm_tsc_adjust); > > +#define MSR_SAVE_LOAD_MAX 16Please don''t - let''s keep this flexible, with dynamic sizing similar to how the variable size XSAVE record is being dealt with. Jan> +struct msr_save_load { > + uint32_t index; > + uint64_t val; > +}; > + > +struct hvm_msr { > + unsigned int count; > + struct msr_save_load msr[MSR_SAVE_LOAD_MAX]; > +}; > + > +DECLARE_HVM_SAVE_TYPE(HVM_MSR, 20, struct hvm_msr); > + > /* > * Largest type-code in use > */ > -#define HVM_SAVE_CODE_MAX 19 > +#define HVM_SAVE_CODE_MAX 20 > > #endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */
Jan Beulich wrote:>>>> On 02.12.13 at 09:52, "Liu, Jinsong" <jinsong.liu@intel.com> wrote: >> --- a/xen/include/public/arch-x86/hvm/save.h >> +++ b/xen/include/public/arch-x86/hvm/save.h >> @@ -592,9 +592,22 @@ struct hvm_tsc_adjust { >> >> DECLARE_HVM_SAVE_TYPE(TSC_ADJUST, 19, struct hvm_tsc_adjust); >> >> +#define MSR_SAVE_LOAD_MAX 16 > > Please don''t - let''s keep this flexible, with dynamic sizing similar > to how the variable size XSAVE record is being dealt with. >Sorry, I didn''t figure out how to, considering the format requirement like hvm_save_entry()/ hvm_load_entry() and macro. XSAVE itself are not variable size -- it allocate at init per max feature size requirement (but dynamically xsave/xrstor partial of the buffer). Except this point, I''m OK with other comments. Thanks, Jinsong> >> +struct msr_save_load { >> + uint32_t index; >> + uint64_t val; >> +}; >> + >> +struct hvm_msr { >> + unsigned int count; >> + struct msr_save_load msr[MSR_SAVE_LOAD_MAX]; >> +}; >> + >> +DECLARE_HVM_SAVE_TYPE(HVM_MSR, 20, struct hvm_msr); + >> /* >> * Largest type-code in use >> */ >> -#define HVM_SAVE_CODE_MAX 19 >> +#define HVM_SAVE_CODE_MAX 20 >> >> #endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */
>>> On 02.12.13 at 13:12, "Liu, Jinsong" <jinsong.liu@intel.com> wrote: > Jan Beulich wrote: >>>>> On 02.12.13 at 09:52, "Liu, Jinsong" <jinsong.liu@intel.com> wrote: >>> --- a/xen/include/public/arch-x86/hvm/save.h >>> +++ b/xen/include/public/arch-x86/hvm/save.h >>> @@ -592,9 +592,22 @@ struct hvm_tsc_adjust { >>> >>> DECLARE_HVM_SAVE_TYPE(TSC_ADJUST, 19, struct hvm_tsc_adjust); >>> >>> +#define MSR_SAVE_LOAD_MAX 16 >> >> Please don''t - let''s keep this flexible, with dynamic sizing similar >> to how the variable size XSAVE record is being dealt with. >> > > Sorry, I didn''t figure out how to, considering the format requirement like > hvm_save_entry()/ hvm_load_entry() and macro. > > XSAVE itself are not variable size -- it allocate at init per max feature size > requirement (but dynamically xsave/xrstor partial of the buffer).And you could do the same - collect at boot time how many MSRs you might need to save at most (paralleling HVM_CPU_XSAVE_SIZE(xfeature_mask)), but save only as much as is needed (paralleling HVM_CPU_XSAVE_SIZE(v->arch.xcr0_accum)). Jan
Jan Beulich wrote:>>>> On 02.12.13 at 13:12, "Liu, Jinsong" <jinsong.liu@intel.com> wrote: >> Jan Beulich wrote: >>>>>> On 02.12.13 at 09:52, "Liu, Jinsong" <jinsong.liu@intel.com> >>>>>> wrote: >>>> --- a/xen/include/public/arch-x86/hvm/save.h >>>> +++ b/xen/include/public/arch-x86/hvm/save.h >>>> @@ -592,9 +592,22 @@ struct hvm_tsc_adjust { >>>> >>>> DECLARE_HVM_SAVE_TYPE(TSC_ADJUST, 19, struct hvm_tsc_adjust); >>>> >>>> +#define MSR_SAVE_LOAD_MAX 16 >>> >>> Please don''t - let''s keep this flexible, with dynamic sizing similar >>> to how the variable size XSAVE record is being dealt with. >>> >> >> Sorry, I didn''t figure out how to, considering the format >> requirement like hvm_save_entry()/ hvm_load_entry() and macro. >> >> XSAVE itself are not variable size -- it allocate at init per max >> feature size requirement (but dynamically xsave/xrstor partial of >> the buffer). > > And you could do the same - collect at boot time how many MSRs > you might need to save at most (paralleling > HVM_CPU_XSAVE_SIZE(xfeature_mask)), but save only as much as > is needed (paralleling HVM_CPU_XSAVE_SIZE(v->arch.xcr0_accum)). >Seems it can hardly find a generic way to collect what MSRs need save/restore, however, it''s in fact quite easy for programmer who add specific MSR save/restore code -- based on this we implement patches which removed the non-extensible MSR_SAVE_LOAD_MAX. Patches will send out later. Thanks, Jinsong