<suravee.suthikulpanit@amd.com>
2013-Aug-05 08:31 UTC
[PATCH 1/1 V5] x86/AMD: Fix nested svm crash due to assertion in __virt_to_maddr
From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Fix assertion in __virt_to_maddr when starting nested SVM guest in debug mode. Investigation has shown that svm_vmsave/svm_vmload make use of __pa() with invalid address. Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> --- Changes from V4: - Return #GP instead of #UD when fail to map nested VMCB (per Tim suggestion) - Rename the funtion to "nsvm_get_nvmcb_page" (per Tim/Christoph suggestion) - Use page_to_maddr instead of page_to_mfn (per Tim suggestiong) xen/arch/x86/hvm/svm/svm.c | 70 ++++++++++++++++++++++++++++--------- xen/include/asm-x86/hvm/svm/svm.h | 11 +++--- 2 files changed, 60 insertions(+), 21 deletions(-) diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 4cc4b15..40c3e15 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -1779,15 +1779,15 @@ static void svm_vmexit_do_vmrun(struct cpu_user_regs *regs, struct vcpu *v, uint64_t vmcbaddr) { - if (!nestedhvm_enabled(v->domain)) { + if ( !nestedhvm_enabled(v->domain) || !hvm_svm_enabled(v) ) { gdprintk(XENLOG_ERR, "VMRUN: nestedhvm disabled, injecting #UD\n"); hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE); return; } - if (!nestedsvm_vmcb_map(v, vmcbaddr)) { - gdprintk(XENLOG_ERR, "VMRUN: mapping vmcb failed, injecting #UD\n"); - hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE); + if ( !nestedsvm_vmcb_map(v, vmcbaddr) ) { + gdprintk(XENLOG_ERR, "VMRUN: mapping vmcb failed, injecting #GP\n"); + hvm_inject_hw_exception(TRAP_gp_fault, HVM_DELIVER_NO_ERROR_CODE); return; } @@ -1795,6 +1795,32 @@ svm_vmexit_do_vmrun(struct cpu_user_regs *regs, return; } +static struct page_info * +nsvm_get_nvmcb_page(struct vcpu *v, uint64_t vmcbaddr) +{ + p2m_type_t p2mt; + struct page_info *page; + struct nestedvcpu *nv = &vcpu_nestedhvm(v); + + if (!nestedsvm_vmcb_map(v, vmcbaddr)) + return NULL; + + /* Need to translate L1-GPA to MPA */ + page = get_page_from_gfn(v->domain, + nv->nv_vvmcxaddr >> PAGE_SHIFT, + &p2mt, P2M_ALLOC | P2M_UNSHARE); + if ( !page ) + return NULL; + + if ( !p2m_is_ram(p2mt) || p2m_is_readonly(p2mt) ) + { + put_page(page); + return NULL; + } + + return page; +} + static void svm_vmexit_do_vmload(struct vmcb_struct *vmcb, struct cpu_user_regs *regs, @@ -1802,24 +1828,30 @@ svm_vmexit_do_vmload(struct vmcb_struct *vmcb, { int ret; unsigned int inst_len; - struct nestedvcpu *nv = &vcpu_nestedhvm(v); + struct page_info *page; if ( (inst_len = __get_instruction_length(v, INSTR_VMLOAD)) == 0 ) return; - if (!nestedhvm_enabled(v->domain)) { + if ( !nestedhvm_enabled(v->domain) || !hvm_svm_enabled(v) ) + { gdprintk(XENLOG_ERR, "VMLOAD: nestedhvm disabled, injecting #UD\n"); ret = TRAP_invalid_op; goto inject; } - if (!nestedsvm_vmcb_map(v, vmcbaddr)) { - gdprintk(XENLOG_ERR, "VMLOAD: mapping vmcb failed, injecting #UD\n"); - ret = TRAP_invalid_op; + page = nsvm_get_nvmcb_page(v, vmcbaddr); + if ( !page ) + { + gdprintk(XENLOG_ERR, + "VMLOAD: mapping failed, injecting #GP\n"); + ret = TRAP_gp_fault; goto inject; } - svm_vmload(nv->nv_vvmcx); + svm_vmload_pa(page_to_maddr(page)); + put_page(page); + /* State in L1 VMCB is stale now */ v->arch.hvm_svm.vmcb_in_sync = 0; @@ -1838,25 +1870,29 @@ svm_vmexit_do_vmsave(struct vmcb_struct *vmcb, { int ret; unsigned int inst_len; - struct nestedvcpu *nv = &vcpu_nestedhvm(v); + struct page_info *page; if ( (inst_len = __get_instruction_length(v, INSTR_VMSAVE)) == 0 ) return; - if (!nestedhvm_enabled(v->domain)) { + if ( !nestedhvm_enabled(v->domain) || !hvm_svm_enabled(v) ) + { gdprintk(XENLOG_ERR, "VMSAVE: nestedhvm disabled, injecting #UD\n"); ret = TRAP_invalid_op; goto inject; } - if (!nestedsvm_vmcb_map(v, vmcbaddr)) { - gdprintk(XENLOG_ERR, "VMSAVE: mapping vmcb failed, injecting #UD\n"); - ret = TRAP_invalid_op; + page = nsvm_get_nvmcb_page(v, vmcbaddr); + if ( !page ) + { + gdprintk(XENLOG_ERR, + "VMSAVE: mapping vmcb failed, injecting #GP\n"); + ret = TRAP_gp_fault; goto inject; } - svm_vmsave(nv->nv_vvmcx); - + svm_vmsave_pa(page_to_maddr(page)); + put_page(page); __update_guest_eip(regs, inst_len); return; diff --git a/xen/include/asm-x86/hvm/svm/svm.h b/xen/include/asm-x86/hvm/svm/svm.h index 64e7e25..1ffe6d6 100644 --- a/xen/include/asm-x86/hvm/svm/svm.h +++ b/xen/include/asm-x86/hvm/svm/svm.h @@ -41,18 +41,21 @@ #define SVM_REG_R14 (14) #define SVM_REG_R15 (15) -static inline void svm_vmload(void *vmcb) +#define svm_vmload(x) svm_vmload_pa(__pa(x)) +#define svm_vmsave(x) svm_vmsave_pa(__pa(x)) + +static inline void svm_vmload_pa(paddr_t vmcb) { asm volatile ( ".byte 0x0f,0x01,0xda" /* vmload */ - : : "a" (__pa(vmcb)) : "memory" ); + : : "a" (vmcb) : "memory" ); } -static inline void svm_vmsave(void *vmcb) +static inline void svm_vmsave_pa(paddr_t vmcb) { asm volatile ( ".byte 0x0f,0x01,0xdb" /* vmsave */ - : : "a" (__pa(vmcb)) : "memory" ); + : : "a" (vmcb) : "memory" ); } static inline void svm_invlpga(unsigned long vaddr, uint32_t asid) -- 1.7.10.4
Jan Beulich
2013-Aug-07 13:17 UTC
Re: [PATCH 1/1 V5] x86/AMD: Fix nested svm crash due to assertion in __virt_to_maddr
>>> On 05.08.13 at 10:31, <suravee.suthikulpanit@amd.com> wrote: > From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> > > Fix assertion in __virt_to_maddr when starting nested SVM guest > in debug mode. Investigation has shown that svm_vmsave/svm_vmload > make use of __pa() with invalid address. > > Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>Tim - have all your earlier comments been addressed in this version?> --- a/xen/arch/x86/hvm/svm/svm.c > +++ b/xen/arch/x86/hvm/svm/svm.c > @@ -1779,15 +1779,15 @@ static void > svm_vmexit_do_vmrun(struct cpu_user_regs *regs, > struct vcpu *v, uint64_t vmcbaddr) > { > - if (!nestedhvm_enabled(v->domain)) { > + if ( !nestedhvm_enabled(v->domain) || !hvm_svm_enabled(v) ) {Suravee, why is this change needed (here and further down)? Can we really get here when hvm_svm_enabled(v) returns false? I don''t recall this having been there in earlier versions. Also, if the change _is_ needed, just like done further down you should have taken the opportunity and fix the placement of the closing brace (also again later in this function).> +static struct page_info * > +nsvm_get_nvmcb_page(struct vcpu *v, uint64_t vmcbaddr) > +{ > + p2m_type_t p2mt; > + struct page_info *page; > + struct nestedvcpu *nv = &vcpu_nestedhvm(v); > + > + if (!nestedsvm_vmcb_map(v, vmcbaddr))Coding style.> + return NULL;Hard tab. Jan
Suravee Suthikulanit
2013-Aug-07 22:18 UTC
Re: [PATCH 1/1 V5] x86/AMD: Fix nested svm crash due to assertion in __virt_to_maddr
On 8/7/2013 8:17 AM, Jan Beulich wrote:>>>> On 05.08.13 at 10:31, <suravee.suthikulpanit@amd.com> wrote: >> From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> >> >> Fix assertion in __virt_to_maddr when starting nested SVM guest >> in debug mode. Investigation has shown that svm_vmsave/svm_vmload >> make use of __pa() with invalid address. >> >> Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> > Tim - have all your earlier comments been addressed in this version? > >> --- a/xen/arch/x86/hvm/svm/svm.c >> +++ b/xen/arch/x86/hvm/svm/svm.c >> @@ -1779,15 +1779,15 @@ static void >> svm_vmexit_do_vmrun(struct cpu_user_regs *regs, >> struct vcpu *v, uint64_t vmcbaddr) >> { >> - if (!nestedhvm_enabled(v->domain)) { >> + if ( !nestedhvm_enabled(v->domain) || !hvm_svm_enabled(v) ) { > Suravee, why is this change needed (here and further down)? > Can we really get here when hvm_svm_enabled(v) returns false? > I don''t recall this having been there in earlier versions.Basically, I double checked the logic for all the svm_vmexit_do_vmxxx to make sure that the proper exception has been raised. We had a discussion whether it should returned #GP or #UD. In this case, if the L1 vcpu does not have SVME bit in the EFER set, it should return #UD. Otherewise, it should return #GP. Here the hvm_svm_enabled(v) is return true when L1 guest enabled SVM in EFER. #define hvm_svm_enabled(v) (!!((v)->arch.hvm_vcpu.guest_efer & EFER_SVME)) So, I decided to add the check here as well. Unless you think it is not necessary.> Also, if the change _is_ needed, just like done further down you > should have taken the opportunity and fix the placement of the > closing brace (also again later in this function).Will take care of that if needed.> >> +static struct page_info * >> +nsvm_get_nvmcb_page(struct vcpu *v, uint64_t vmcbaddr) >> +{ >> + p2m_type_t p2mt; >> + struct page_info *page; >> + struct nestedvcpu *nv = &vcpu_nestedhvm(v); >> + >> + if (!nestedsvm_vmcb_map(v, vmcbaddr)) > Coding style.OK> >> + return NULL; > Hard tab. > > JanOK Thanks, Suravee
Jan Beulich
2013-Aug-08 06:47 UTC
Re: [PATCH 1/1 V5] x86/AMD: Fix nested svm crash due to assertion in __virt_to_maddr
>>> On 08.08.13 at 00:18, Suravee Suthikulanit <suravee.suthikulpanit@amd.com> wrote: > On 8/7/2013 8:17 AM, Jan Beulich wrote: >>>>> On 05.08.13 at 10:31, <suravee.suthikulpanit@amd.com> wrote: >>> --- a/xen/arch/x86/hvm/svm/svm.c >>> +++ b/xen/arch/x86/hvm/svm/svm.c >>> @@ -1779,15 +1779,15 @@ static void >>> svm_vmexit_do_vmrun(struct cpu_user_regs *regs, >>> struct vcpu *v, uint64_t vmcbaddr) >>> { >>> - if (!nestedhvm_enabled(v->domain)) { >>> + if ( !nestedhvm_enabled(v->domain) || !hvm_svm_enabled(v) ) { >> Suravee, why is this change needed (here and further down)? >> Can we really get here when hvm_svm_enabled(v) returns false? >> I don''t recall this having been there in earlier versions. > > Basically, I double checked the logic for all the svm_vmexit_do_vmxxx to > make sure > that the proper exception has been raised. We had a discussion whether it > should > returned #GP or #UD. In this case, if the L1 vcpu does not have SVME > bit in the EFER set, it should return #UD. Otherewise, it should return #GP. > > Here the hvm_svm_enabled(v) is return true when L1 guest enabled SVM in > EFER. > > #define hvm_svm_enabled(v) (!!((v)->arch.hvm_vcpu.guest_efer & EFER_SVME)) > > So, I decided to add the check here as well. Unless you think it is not > necessary.I don''t know enough about the nested HVM state handling to be certain it''s not needed. The change just looks bogus in the context of the subject of the patch, along with the need for it not being explained in the patch description. And of course it doesn''t help things that no prior uses of hvm_svm_enabled() exist in the tree (which I didn''t realize before, as the name doesn''t even suggest this is a nested-only construct)... So either you fully explain in the patch description why the change is necessary/correct _here_, or (better imo) you break it out into a separate patch (making it obvious that then patch title and/or description will make clear why the change is needed). In any case - explaining how nestedhvm_enabled() could end up returning a value different from hvm_svm_enabled() would help my understanding. Jan
Tim Deegan
2013-Aug-08 09:38 UTC
Re: [PATCH 1/1 V5] x86/AMD: Fix nested svm crash due to assertion in __virt_to_maddr
At 14:17 +0100 on 07 Aug (1375885025), Jan Beulich wrote:> >>> On 05.08.13 at 10:31, <suravee.suthikulpanit@amd.com> wrote: > > From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> > > > > Fix assertion in __virt_to_maddr when starting nested SVM guest > > in debug mode. Investigation has shown that svm_vmsave/svm_vmload > > make use of __pa() with invalid address. > > > > Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> > > Tim - have all your earlier comments been addressed in this version?Yes, I''m happy with this one. Reviewed-by: Tim Deegan <tim@xen.org>> > - if (!nestedhvm_enabled(v->domain)) { > > + if ( !nestedhvm_enabled(v->domain) || !hvm_svm_enabled(v) ) { > > Suravee, why is this change needed (here and further down)? > Can we really get here when hvm_svm_enabled(v) returns false? > I don''t recall this having been there in earlier versions.This came from discussion of what fault to inject -- we always intercept VM{RUN,LOAD,SAVE} so I think we can get here. The AMD docs for those say: "Checks exceptions (#GP) before the intercept." but nothing about checking guest_efer.SVME so AFAICT we have to do that in Xen. Arguably this fix could could be a separate patch. Certainly the same check ought to go into svm_exit_do_vmrun(). Tim.
Suravee Suthikulanit
2013-Aug-08 15:55 UTC
Re: [PATCH 1/1 V5] x86/AMD: Fix nested svm crash due to assertion in __virt_to_maddr
On 8/8/2013 1:47 AM, Jan Beulich wrote:>>>> On 08.08.13 at 00:18, Suravee Suthikulanit <suravee.suthikulpanit@amd.com> wrote: >> On 8/7/2013 8:17 AM, Jan Beulich wrote: >>>>>> On 05.08.13 at 10:31, <suravee.suthikulpanit@amd.com> wrote: >>>> --- a/xen/arch/x86/hvm/svm/svm.c >>>> +++ b/xen/arch/x86/hvm/svm/svm.c >>>> @@ -1779,15 +1779,15 @@ static void >>>> svm_vmexit_do_vmrun(struct cpu_user_regs *regs, >>>> struct vcpu *v, uint64_t vmcbaddr) >>>> { >>>> - if (!nestedhvm_enabled(v->domain)) { >>>> + if ( !nestedhvm_enabled(v->domain) || !hvm_svm_enabled(v) ) { >>> Suravee, why is this change needed (here and further down)? >>> Can we really get here when hvm_svm_enabled(v) returns false? >>> I don''t recall this having been there in earlier versions. >> Basically, I double checked the logic for all the svm_vmexit_do_vmxxx to >> make sure >> that the proper exception has been raised. We had a discussion whether it >> should >> returned #GP or #UD. In this case, if the L1 vcpu does not have SVME >> bit in the EFER set, it should return #UD. Otherewise, it should return #GP. >> >> Here the hvm_svm_enabled(v) is return true when L1 guest enabled SVM in >> EFER. >> >> #define hvm_svm_enabled(v) (!!((v)->arch.hvm_vcpu.guest_efer & EFER_SVME)) >> >> So, I decided to add the check here as well. Unless you think it is not >> necessary. > I don''t know enough about the nested HVM state handling to be > certain it''s not needed. The change just looks bogus in the > context of the subject of the patch, along with the need for it not > being explained in the patch description.I''ll separate the patch to make it more clear.> And of course it doesn''t help things that no prior uses of hvm_svm_enabled() exist in the > tree (which I didn''t realize before, as the name doesn''t even > suggest this is a nested-only construct)...This macro was used in the past. However, the code was removed, but the macro still exist. I''ll update the macro name.> So either you fully explain in the patch description why the change > is necessary/correct _here_, or (better imo) you break it out into a > separate patch (making it obvious that then patch title and/or > description will make clear why the change is needed).I''ll update the description. Suravee
Suravee Suthikulanit
2013-Aug-08 16:42 UTC
Re: [PATCH 1/1 V5] x86/AMD: Fix nested svm crash due to assertion in __virt_to_maddr
On 8/8/2013 4:38 AM, Tim Deegan wrote:> At 14:17 +0100 on 07 Aug (1375885025), Jan Beulich wrote: >>>>> On 05.08.13 at 10:31, <suravee.suthikulpanit@amd.com> wrote: >>> From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> >>> >>> Fix assertion in __virt_to_maddr when starting nested SVM guest >>> in debug mode. Investigation has shown that svm_vmsave/svm_vmload >>> make use of __pa() with invalid address. >>> >>> Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> >> Tim - have all your earlier comments been addressed in this version? > Yes, I''m happy with this one. > > Reviewed-by: Tim Deegan <tim@xen.org> > >>> - if (!nestedhvm_enabled(v->domain)) { >>> + if ( !nestedhvm_enabled(v->domain) || !hvm_svm_enabled(v) ) { >> Suravee, why is this change needed (here and further down)? >> Can we really get here when hvm_svm_enabled(v) returns false? >> I don''t recall this having been there in earlier versions. > This came from discussion of what fault to inject -- we always intercept > VM{RUN,LOAD,SAVE} so I think we can get here. The AMD docs for those say: > "Checks exceptions (#GP) before the intercept." > but nothing about checking guest_efer.SVME so AFAICT we have to do that > in Xen. > > Arguably this fix could could be a separate patch. Certainly the same > check ought to go into svm_exit_do_vmrun(). > > Tim. >Here, the "nestedhvm_enabled(v->domain)" is implemented as /* Nested HVM on/off per domain */ bool_t nestedhvm_enabled(struct domain *d) { return is_hvm_domain(d) && d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM]; } I''m not familiar with this, but I believe this is the option in the HVM config file"nestedhvm=1". Suravee _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel
Egger, Christoph
2013-Aug-12 08:57 UTC
Re: [PATCH 1/1 V5] x86/AMD: Fix nested svm crash due to assertion in __virt_to_maddr
On 08.08.13 08:47, Jan Beulich wrote:> In any case - explaining how nestedhvm_enabled() could end up > returning a value different from hvm_svm_enabled() would help > my understanding.nestedhvm_enabled() returns true when ''nestedhvm=1'' in the guest config file. hvm_svm_enabled() returns true when the hvm guest enabled SVM in EFER. Christoph
Jan Beulich
2013-Aug-12 09:01 UTC
Re: [PATCH 1/1 V5] x86/AMD: Fix nested svm crash due to assertion in __virt_to_maddr
>>> On 12.08.13 at 10:57, "Egger, Christoph" <chegger@amazon.de> wrote: > On 08.08.13 08:47, Jan Beulich wrote: >> In any case - explaining how nestedhvm_enabled() could end up >> returning a value different from hvm_svm_enabled() would help >> my understanding. > > nestedhvm_enabled() returns true when ''nestedhvm=1'' in the > guest config file. > > hvm_svm_enabled() returns true when the hvm guest enabled SVM > in EFER.And the guest should certainly be disallowed to enable SVM in EFER when nestedhvm was not 1 in the config file. Or am I missing something here? Jan
Egger, Christoph
2013-Aug-12 11:13 UTC
Re: [PATCH 1/1 V5] x86/AMD: Fix nested svm crash due to assertion in __virt_to_maddr
On 12.08.13 11:01, Jan Beulich wrote:>>>> On 12.08.13 at 10:57, "Egger, Christoph" <chegger@amazon.de> wrote: >> On 08.08.13 08:47, Jan Beulich wrote: >>> In any case - explaining how nestedhvm_enabled() could end up >>> returning a value different from hvm_svm_enabled() would help >>> my understanding. >> >> nestedhvm_enabled() returns true when ''nestedhvm=1'' in the >> guest config file. >> >> hvm_svm_enabled() returns true when the hvm guest enabled SVM >> in EFER. > > And the guest should certainly be disallowed to enable SVM in > EFER when nestedhvm was not 1 in the config file.That''s correct. The guest should also never see SVM available via cpuid. Analogous same regarding VMX on Intel. Christoph
Jan Beulich
2013-Aug-12 13:18 UTC
Re: [PATCH 1/1 V5] x86/AMD: Fix nested svm crash due to assertion in __virt_to_maddr
>>> On 12.08.13 at 13:13, "Egger, Christoph" <chegger@amazon.de> wrote: > On 12.08.13 11:01, Jan Beulich wrote: >>>>> On 12.08.13 at 10:57, "Egger, Christoph" <chegger@amazon.de> wrote: >>> On 08.08.13 08:47, Jan Beulich wrote: >>>> In any case - explaining how nestedhvm_enabled() could end up >>>> returning a value different from hvm_svm_enabled() would help >>>> my understanding. >>> >>> nestedhvm_enabled() returns true when ''nestedhvm=1'' in the >>> guest config file. >>> >>> hvm_svm_enabled() returns true when the hvm guest enabled SVM >>> in EFER. >> >> And the guest should certainly be disallowed to enable SVM in >> EFER when nestedhvm was not 1 in the config file. > > That''s correct. The guest should also never see SVM available via > cpuid. > Analogous same regarding VMX on Intel.So Suravee, bottom line from this is: Replace the prior checks instead of adding the new ones. Jan
Suravee Suthikulpanit
2013-Aug-12 14:04 UTC
Re: [PATCH 1/1 V5] x86/AMD: Fix nested svm crash due to assertion in __virt_to_maddr
On 8/12/2013 8:18 AM, Jan Beulich wrote:>>>> On 12.08.13 at 13:13, "Egger, Christoph" <chegger@amazon.de> wrote: >> On 12.08.13 11:01, Jan Beulich wrote: >>>>>> On 12.08.13 at 10:57, "Egger, Christoph" <chegger@amazon.de> wrote: >>>> On 08.08.13 08:47, Jan Beulich wrote: >>>>> In any case - explaining how nestedhvm_enabled() could end up >>>>> returning a value different from hvm_svm_enabled() would help >>>>> my understanding. >>>> nestedhvm_enabled() returns true when ''nestedhvm=1'' in the >>>> guest config file. >>>> >>>> hvm_svm_enabled() returns true when the hvm guest enabled SVM >>>> in EFER. >>> And the guest should certainly be disallowed to enable SVM in >>> EFER when nestedhvm was not 1 in the config file. >> That''s correct. The guest should also never see SVM available via >> cpuid. >> Analogous same regarding VMX on Intel. > So Suravee, bottom line from this is: Replace the prior checks > instead of adding the new ones. > > Jan > >Ok... I will replace the hvm_svm_enabled() to check the EFER.SVME bit instead. I sent out the V6 on Friday which I haveseparated the patch into two. Would you mind taking one last quick look. Thank you, Suravee _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel
Jan Beulich
2013-Aug-12 14:26 UTC
Re: [PATCH 1/1 V5] x86/AMD: Fix nested svm crash due to assertion in __virt_to_maddr
>>> On 12.08.13 at 16:04, Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> wrote: > Ok... I will replace the hvm_svm_enabled() to check the EFER.SVME bit > instead. > I sent out the V6 on Friday which I haveseparated the patch into two. > Would you mind taking one last quick look.I looked mostly fine; I took note of removing two unrelated changes of this kind - if (!nestedhvm_enabled(v->domain)) { + if ( !nestedhvm_enabled(v->domain) ) + { out of that patch into the second one, which touches those lines anyway. As you''re going to do another round, please do this fixup as you go. Jan
Egger, Christoph
2013-Aug-12 14:40 UTC
Re: [PATCH 1/1 V5] x86/AMD: Fix nested svm crash due to assertion in __virt_to_maddr
On 12.08.13 16:04, Suravee Suthikulpanit wrote:> On 8/12/2013 8:18 AM, Jan Beulich wrote: >>>>> On 12.08.13 at 13:13, "Egger, Christoph" <chegger@amazon.de> wrote: >>> On 12.08.13 11:01, Jan Beulich wrote: >>>>>>> On 12.08.13 at 10:57, "Egger, Christoph" <chegger@amazon.de> wrote: >>>>> On 08.08.13 08:47, Jan Beulich wrote: >>>>>> In any case - explaining how nestedhvm_enabled() could end up >>>>>> returning a value different from hvm_svm_enabled() would help >>>>>> my understanding. >>>>> nestedhvm_enabled() returns true when ''nestedhvm=1'' in the >>>>> guest config file. >>>>> >>>>> hvm_svm_enabled() returns true when the hvm guest enabled SVM >>>>> in EFER. >>>> And the guest should certainly be disallowed to enable SVM in >>>> EFER when nestedhvm was not 1 in the config file. >>> That''s correct. The guest should also never see SVM available via >>> cpuid. >>> Analogous same regarding VMX on Intel. >> So Suravee, bottom line from this is: Replace the prior checks >> instead of adding the new ones. >> >> Jan >> >> > Ok... I will replace the hvm_svm_enabled() to check the EFER.SVME bit > instead. > I sent out the V6 on Friday which I have separated the patch into two. > Would you mind taking one last quick look.Looking into the how hvm_svm_enabled() is implemented ... /* True when l1 guest enabled SVM in EFER */ #define hvm_svm_enabled(v) \ (!!((v)->arch.hvm_vcpu.guest_efer & EFER_SVME)) ... it is already doing this. Christoph
Jan Beulich
2013-Aug-12 15:26 UTC
Re: [PATCH 1/1 V5] x86/AMD: Fix nested svm crash due to assertion in __virt_to_maddr
>>> On 12.08.13 at 16:40, "Egger, Christoph" <chegger@amazon.de> wrote: > On 12.08.13 16:04, Suravee Suthikulpanit wrote: >> On 8/12/2013 8:18 AM, Jan Beulich wrote: >>>>>> On 12.08.13 at 13:13, "Egger, Christoph" <chegger@amazon.de> wrote: >>>> On 12.08.13 11:01, Jan Beulich wrote: >>>>>>>> On 12.08.13 at 10:57, "Egger, Christoph" <chegger@amazon.de> wrote: >>>>>> On 08.08.13 08:47, Jan Beulich wrote: >>>>>>> In any case - explaining how nestedhvm_enabled() could end up >>>>>>> returning a value different from hvm_svm_enabled() would help >>>>>>> my understanding. >>>>>> nestedhvm_enabled() returns true when ''nestedhvm=1'' in the >>>>>> guest config file. >>>>>> >>>>>> hvm_svm_enabled() returns true when the hvm guest enabled SVM >>>>>> in EFER. >>>>> And the guest should certainly be disallowed to enable SVM in >>>>> EFER when nestedhvm was not 1 in the config file. >>>> That''s correct. The guest should also never see SVM available via >>>> cpuid. >>>> Analogous same regarding VMX on Intel. >>> So Suravee, bottom line from this is: Replace the prior checks >>> instead of adding the new ones. >>> >>> Jan >>> >>> >> Ok... I will replace the hvm_svm_enabled() to check the EFER.SVME bit >> instead. >> I sent out the V6 on Friday which I have separated the patch into two. >> Would you mind taking one last quick look. > > Looking into the how hvm_svm_enabled() is implemented ... > > /* True when l1 guest enabled SVM in EFER */ > #define hvm_svm_enabled(v) \ > (!!((v)->arch.hvm_vcpu.guest_efer & EFER_SVME)) > > ... it is already doing this.Right - my request also was to replace the nestedhvm_enabled() checks in question with hvm_svm_enabled() ones, rather than checking both. Jan