<suravee.suthikulpanit@amd.com>
2013-Jul-26 21:46 UTC
[PATCH 1/1 V4] x86/AMD: Fix nested svm crash due to assertion in __virt_to_maddr
From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Fix assertion in __virt_to_maddr when starting nested SVM guest in debug mode. Investigation has shown that svm_vmsave/svm_vmload make use of __pa() with invalid address. Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> --- Changes in V4: - Use get_page_from_gfn instead. xen/arch/x86/hvm/svm/svm.c | 49 +++++++++++++++++++++++++++++++++++-- xen/include/asm-x86/hvm/svm/svm.h | 11 ++++++--- 2 files changed, 54 insertions(+), 6 deletions(-) diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 4cc4b15..e3b3cab 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -1795,6 +1795,27 @@ svm_vmexit_do_vmrun(struct cpu_user_regs *regs, return; } +static struct page_info * +_get_vmcb_page(struct domain *d, uint64_t vmcbaddr) +{ + struct page_info *page; + p2m_type_t p2mt; + + page = get_page_from_gfn(d, vmcbaddr >> PAGE_SHIFT, + &p2mt, P2M_ALLOC | P2M_UNSHARE); + + if (!page) + return NULL; + + if ( !p2m_is_ram(p2mt) || p2m_is_readonly(p2mt) ) + { + put_page(page); + return NULL; + } + + return page; +} + static void svm_vmexit_do_vmload(struct vmcb_struct *vmcb, struct cpu_user_regs *regs, @@ -1802,6 +1823,7 @@ svm_vmexit_do_vmload(struct vmcb_struct *vmcb, { int ret; unsigned int inst_len; + struct page_info *page; struct nestedvcpu *nv = &vcpu_nestedhvm(v); if ( (inst_len = __get_instruction_length(v, INSTR_VMLOAD)) == 0 ) @@ -1819,7 +1841,19 @@ svm_vmexit_do_vmload(struct vmcb_struct *vmcb, goto inject; } - svm_vmload(nv->nv_vvmcx); + /* Need to translate L1-GPA to MPA */ + page = _get_vmcb_page(v->domain, nv->nv_vvmcxaddr); + if (!page) + { + gdprintk(XENLOG_ERR, + "VMLOAD: mapping vmcb L1-GPA to MPA failed, injecting #UD\n"); + ret = TRAP_invalid_op; + goto inject; + } + + svm_vmload_pa(page_to_mfn(page) << PAGE_SHIFT); + put_page(page); + /* State in L1 VMCB is stale now */ v->arch.hvm_svm.vmcb_in_sync = 0; @@ -1838,6 +1872,7 @@ svm_vmexit_do_vmsave(struct vmcb_struct *vmcb, { int ret; unsigned int inst_len; + struct page_info *page; struct nestedvcpu *nv = &vcpu_nestedhvm(v); if ( (inst_len = __get_instruction_length(v, INSTR_VMSAVE)) == 0 ) @@ -1855,8 +1890,18 @@ svm_vmexit_do_vmsave(struct vmcb_struct *vmcb, goto inject; } - svm_vmsave(nv->nv_vvmcx); + /* Need to translate L1-GPA to MPA */ + page = _get_vmcb_page(v->domain, nv->nv_vvmcxaddr); + if (!page) + { + gdprintk(XENLOG_ERR, + "VMSAVE: mapping vmcb L1-GPA to MPA failed, injecting #UD\n"); + ret = TRAP_invalid_op; + goto inject; + } + svm_vmsave_pa(page_to_mfn(page) << PAGE_SHIFT); + put_page(page); __update_guest_eip(regs, inst_len); return; diff --git a/xen/include/asm-x86/hvm/svm/svm.h b/xen/include/asm-x86/hvm/svm/svm.h index 64e7e25..1ffe6d6 100644 --- a/xen/include/asm-x86/hvm/svm/svm.h +++ b/xen/include/asm-x86/hvm/svm/svm.h @@ -41,18 +41,21 @@ #define SVM_REG_R14 (14) #define SVM_REG_R15 (15) -static inline void svm_vmload(void *vmcb) +#define svm_vmload(x) svm_vmload_pa(__pa(x)) +#define svm_vmsave(x) svm_vmsave_pa(__pa(x)) + +static inline void svm_vmload_pa(paddr_t vmcb) { asm volatile ( ".byte 0x0f,0x01,0xda" /* vmload */ - : : "a" (__pa(vmcb)) : "memory" ); + : : "a" (vmcb) : "memory" ); } -static inline void svm_vmsave(void *vmcb) +static inline void svm_vmsave_pa(paddr_t vmcb) { asm volatile ( ".byte 0x0f,0x01,0xdb" /* vmsave */ - : : "a" (__pa(vmcb)) : "memory" ); + : : "a" (vmcb) : "memory" ); } static inline void svm_invlpga(unsigned long vaddr, uint32_t asid) -- 1.7.10.4
Tim Deegan
2013-Jul-29 10:43 UTC
Re: [PATCH 1/1 V4] x86/AMD: Fix nested svm crash due to assertion in __virt_to_maddr
Hi, At 16:46 -0500 on 26 Jul (1374857167), suravee.suthikulpanit@amd.com wrote:> From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> > > Fix assertion in __virt_to_maddr when starting nested SVM guest > in debug mode. Investigation has shown that svm_vmsave/svm_vmload > make use of __pa() with invalid address. > > Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>This looks much better, but I have a few comments still:> +static struct page_info * > +_get_vmcb_page(struct domain *d, uint64_t vmcbaddr)Can you give this a name that makes it clearer that it''s for nested VMCBs and not part of the handling of ''real'' VMCBs? Also, please drop the leading underscore.> +{ > + struct page_info *page; > + p2m_type_t p2mt; > + > + page = get_page_from_gfn(d, vmcbaddr >> PAGE_SHIFT, > + &p2mt, P2M_ALLOC | P2M_UNSHARE); > + > + if (!page)Missing whitespace.> + return NULL; > + > + if ( !p2m_is_ram(p2mt) || p2m_is_readonly(p2mt) ) > + { > + put_page(page); > + return NULL; > + } > + > + return page; > +} > + > static void > svm_vmexit_do_vmload(struct vmcb_struct *vmcb, > struct cpu_user_regs *regs, > @@ -1802,6 +1823,7 @@ svm_vmexit_do_vmload(struct vmcb_struct *vmcb, > { > int ret; > unsigned int inst_len; > + struct page_info *page; > struct nestedvcpu *nv = &vcpu_nestedhvm(v); > > if ( (inst_len = __get_instruction_length(v, INSTR_VMLOAD)) == 0 ) > @@ -1819,7 +1841,19 @@ svm_vmexit_do_vmload(struct vmcb_struct *vmcb, > goto inject; > } > > - svm_vmload(nv->nv_vvmcx); > + /* Need to translate L1-GPA to MPA */ > + page = _get_vmcb_page(v->domain, nv->nv_vvmcxaddr); > + if (!page)Whitespace.> + { > + gdprintk(XENLOG_ERR, > + "VMLOAD: mapping vmcb L1-GPA to MPA failed, injecting #UD\n"); > + ret = TRAP_invalid_op;The documentation for VMLOAD suggests TRAP_gp_fault for this case.> + goto inject; > + } > + > + svm_vmload_pa(page_to_mfn(page) << PAGE_SHIFT);Please use page_to_maddr() for this.> + put_page(page); > + > /* State in L1 VMCB is stale now */ > v->arch.hvm_svm.vmcb_in_sync = 0; > > @@ -1838,6 +1872,7 @@ svm_vmexit_do_vmsave(struct vmcb_struct *vmcb, > { > int ret; > unsigned int inst_len; > + struct page_info *page; > struct nestedvcpu *nv = &vcpu_nestedhvm(v); > > if ( (inst_len = __get_instruction_length(v, INSTR_VMSAVE)) == 0 ) > @@ -1855,8 +1890,18 @@ svm_vmexit_do_vmsave(struct vmcb_struct *vmcb, > goto inject; > } > > - svm_vmsave(nv->nv_vvmcx); > + /* Need to translate L1-GPA to MPA */ > + page = _get_vmcb_page(v->domain, nv->nv_vvmcxaddr); > + if (!page)Whitespace.> + { > + gdprintk(XENLOG_ERR, > + "VMSAVE: mapping vmcb L1-GPA to MPA failed, injecting #UD\n"); > + ret = TRAP_invalid_op; > + goto inject; > + } > > + svm_vmsave_pa(page_to_mfn(page) << PAGE_SHIFT);Again, #GP, and page_to_maddr(). Thanks, Tim.
Suravee Suthikulanit
2013-Jul-29 16:27 UTC
Re: [PATCH 1/1 V4] x86/AMD: Fix nested svm crash due to assertion in __virt_to_maddr
On 7/29/2013 5:43 AM, Tim Deegan wrote:> Hi, > > At 16:46 -0500 on 26 Jul (1374857167), suravee.suthikulpanit@amd.com wrote: >> From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> >> >> Fix assertion in __virt_to_maddr when starting nested SVM guest >> in debug mode. Investigation has shown that svm_vmsave/svm_vmload >> make use of __pa() with invalid address. >> >> Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> > This looks much better, but I have a few comments still: > >> +static struct page_info * >> +_get_vmcb_page(struct domain *d, uint64_t vmcbaddr) > Can you give this a name that makes it clearer that it''s for nested > VMCBs and not part of the handling of ''real'' VMCBs? Also, please drop > the leading underscore.What about "get_nvmcb_page"?> >> + { >> + gdprintk(XENLOG_ERR, >> + "VMLOAD: mapping vmcb L1-GPA to MPA failed, injecting #UD\n"); >> + ret = TRAP_invalid_op; > The documentation for VMLOAD suggests TRAP_gp_fault for this case.OK, I have also checked other exceptions injected in svm_vmexit_do_vmsave and svm_vm_exit_do_vmload, and the following should probably also changed to #GP as well. if (!nestedsvm_vmcb_map(v, vmcbaddr)) { gdprintk(XENLOG_ERR, "VMSAVE: mapping vmcb failed, injecting #UD\n"); ret = TRAP_invalid_op; goto inject; }> >> + goto inject; >> + } >> + >> + svm_vmload_pa(page_to_mfn(page) << PAGE_SHIFT); > Please use page_to_maddr() for this.Ah... That''s what I needed. Thanks. Suravee
Tim Deegan
2013-Jul-29 20:04 UTC
Re: [PATCH 1/1 V4] x86/AMD: Fix nested svm crash due to assertion in __virt_to_maddr
At 11:27 -0500 on 29 Jul (1375097276), Suravee Suthikulanit wrote:> On 7/29/2013 5:43 AM, Tim Deegan wrote: > >Hi, > > > >At 16:46 -0500 on 26 Jul (1374857167), suravee.suthikulpanit@amd.com wrote: > >>From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> > >> > >>Fix assertion in __virt_to_maddr when starting nested SVM guest > >>in debug mode. Investigation has shown that svm_vmsave/svm_vmload > >>make use of __pa() with invalid address. > >> > >>Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> > >This looks much better, but I have a few comments still: > > > >>+static struct page_info * > >>+_get_vmcb_page(struct domain *d, uint64_t vmcbaddr) > >Can you give this a name that makes it clearer that it''s for nested > >VMCBs and not part of the handling of ''real'' VMCBs? Also, please drop > >the leading underscore. > What about "get_nvmcb_page"?Yes, that would be good.> > > >>+ { > >>+ gdprintk(XENLOG_ERR, > >>+ "VMLOAD: mapping vmcb L1-GPA to MPA failed, injecting > >>#UD\n"); > >>+ ret = TRAP_invalid_op; > >The documentation for VMLOAD suggests TRAP_gp_fault for this case. > OK, I have also checked other exceptions injected in > svm_vmexit_do_vmsave and svm_vm_exit_do_vmload, and the following should > probably also changed to #GP as well. > > if (!nestedsvm_vmcb_map(v, vmcbaddr)) { > gdprintk(XENLOG_ERR, "VMSAVE: mapping vmcb failed, injecting > #UD\n"); > ret = TRAP_invalid_op; > goto inject; > }Yes, that sounds right. Cheers, Tim.
Egger, Christoph
2013-Jul-30 08:05 UTC
Re: [PATCH 1/1 V4] x86/AMD: Fix nested svm crash due to assertion in __virt_to_maddr
On 29.07.13 18:27, Suravee Suthikulanit wrote:> On 7/29/2013 5:43 AM, Tim Deegan wrote: >> Hi, >> >> At 16:46 -0500 on 26 Jul (1374857167), suravee.suthikulpanit@amd.com >> wrote: >>> From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> >>> >>> Fix assertion in __virt_to_maddr when starting nested SVM guest >>> in debug mode. Investigation has shown that svm_vmsave/svm_vmload >>> make use of __pa() with invalid address. >>> >>> Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> >> This looks much better, but I have a few comments still:Indeed.>> >>> +static struct page_info * >>> +_get_vmcb_page(struct domain *d, uint64_t vmcbaddr) >> Can you give this a name that makes it clearer that it''s for nested >> VMCBs and not part of the handling of ''real'' VMCBs? Also, please drop >> the leading underscore. > What about "get_nvmcb_page"? >That''s good. If you want to follow the naming scheme I suggest nsvm_get_nvmcb_page(). Christoph
Egger, Christoph
2013-Jul-30 08:13 UTC
Re: [PATCH 1/1 V4] x86/AMD: Fix nested svm crash due to assertion in __virt_to_maddr
On 29.07.13 22:04, Tim Deegan wrote:> At 11:27 -0500 on 29 Jul (1375097276), Suravee Suthikulanit wrote: >> On 7/29/2013 5:43 AM, Tim Deegan wrote: >>> Hi, >>> >>> At 16:46 -0500 on 26 Jul (1374857167), suravee.suthikulpanit@amd.com wrote: >>>> From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> >>>> >>>> Fix assertion in __virt_to_maddr when starting nested SVM guest >>>> in debug mode. Investigation has shown that svm_vmsave/svm_vmload >>>> make use of __pa() with invalid address. >>>> >>>> Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> >>> This looks much better, but I have a few comments still: >>> >>>> +static struct page_info * >>>> +_get_vmcb_page(struct domain *d, uint64_t vmcbaddr) >>> Can you give this a name that makes it clearer that it''s for nested >>> VMCBs and not part of the handling of ''real'' VMCBs? Also, please drop >>> the leading underscore. >> What about "get_nvmcb_page"? > > Yes, that would be good. > >>> >>>> + { >>>> + gdprintk(XENLOG_ERR, >>>> + "VMLOAD: mapping vmcb L1-GPA to MPA failed, injecting >>>> #UD\n"); >>>> + ret = TRAP_invalid_op; >>> The documentation for VMLOAD suggests TRAP_gp_fault for this case. >> OK, I have also checked other exceptions injected in >> svm_vmexit_do_vmsave and svm_vm_exit_do_vmload, and the following should >> probably also changed to #GP as well. >> >> if (!nestedsvm_vmcb_map(v, vmcbaddr)) { >> gdprintk(XENLOG_ERR, "VMSAVE: mapping vmcb failed, injecting >> #UD\n"); >> ret = TRAP_invalid_op; >> goto inject; >> } > > Yes, that sounds right.Wait, documentation (http://support.amd.com/us/Processor_TechDocs/24593_APM_v2.pdf, page 470) says: VMLOAD and VMSAVE are available only at CPL-0 (#GP otherwise), and in protected mode with SVM enabled in EFER.SVME (#UD otherwise). Check the code path if EFER.SVME is guaranteed to be set. If not #UD is correct. Christoph
Suravee Suthikulanit
2013-Jul-31 08:46 UTC
Re: [PATCH 1/1 V4] x86/AMD: Fix nested svm crash due to assertion in __virt_to_maddr
On 7/30/2013 3:05 AM, Egger, Christoph wrote:> On 29.07.13 18:27, Suravee Suthikulanit wrote: >> On 7/29/2013 5:43 AM, Tim Deegan wrote: >>> Hi, >>> >>> At 16:46 -0500 on 26 Jul (1374857167), suravee.suthikulpanit@amd.com >>> wrote: >>>> From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> >>>> +static struct page_info * >>>> +_get_vmcb_page(struct domain *d, uint64_t vmcbaddr) >>> Can you give this a name that makes it clearer that it''s for nested >>> VMCBs and not part of the handling of ''real'' VMCBs? Also, please drop >>> the leading underscore. >> What about "get_nvmcb_page"? >> > That''s good. If you want to follow the naming scheme I suggest > nsvm_get_nvmcb_page().Thanks. I will rename to this. Suravee> > Christoph > >
Suravee Suthikulanit
2013-Jul-31 15:52 UTC
Re: [PATCH 1/1 V4] x86/AMD: Fix nested svm crash due to assertion in __virt_to_maddr
On 7/30/2013 3:13 AM, Egger, Christoph wrote:> On 29.07.13 22:04, Tim Deegan wrote: >> At 11:27 -0500 on 29 Jul (1375097276), Suravee Suthikulanit wrote: >>> On 7/29/2013 5:43 AM, Tim Deegan wrote: >>>> Hi, >>>> >>>> At 16:46 -0500 on 26 Jul (1374857167), suravee.suthikulpanit@amd.com wrote: >>>>> From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> >>>>> >>>>> Fix assertion in __virt_to_maddr when starting nested SVM guest >>>>> in debug mode. Investigation has shown that svm_vmsave/svm_vmload >>>>> make use of __pa() with invalid address. >>>>> >>>>> Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> >>>> This looks much better, but I have a few comments still: >>>> >>>>> +static struct page_info * >>>>> +_get_vmcb_page(struct domain *d, uint64_t vmcbaddr) >>>> Can you give this a name that makes it clearer that it''s for nested >>>> VMCBs and not part of the handling of ''real'' VMCBs? Also, please drop >>>> the leading underscore. >>> What about "get_nvmcb_page"? >> Yes, that would be good. >> >>>>> + { >>>>> + gdprintk(XENLOG_ERR, >>>>> + "VMLOAD: mapping vmcb L1-GPA to MPA failed, injecting >>>>> #UD\n"); >>>>> + ret = TRAP_invalid_op; >>>> The documentation for VMLOAD suggests TRAP_gp_fault for this case. >>> OK, I have also checked other exceptions injected in >>> svm_vmexit_do_vmsave and svm_vm_exit_do_vmload, and the following should >>> probably also changed to #GP as well. >>> >>> if (!nestedsvm_vmcb_map(v, vmcbaddr)) { >>> gdprintk(XENLOG_ERR, "VMSAVE: mapping vmcb failed, injecting >>> #UD\n"); >>> ret = TRAP_invalid_op; >>> goto inject; >>> } >> Yes, that sounds right. > Wait, documentation (http://support.amd.com/us/Processor_TechDocs/24593_APM_v2.pdf, page 470) says: > > VMLOAD and VMSAVE are available only at CPL-0 (#GP otherwise), and in protected mode with SVM enabled in EFER.SVME (#UD otherwise). > > Check the code path if EFER.SVME is guaranteed to be set. If not #UD > is correct. > > Christoph > >I also found more detail documented in the APM v3 (http://support.amd.com/us/Processor_TechDocs/24594_APM_v3.pdf). // This instruction can only be executed in protected mode with SVM enabled IF ((MSR_EFER.SVME == 0) || (!PROTECTED_MODE)) EXCEPTION [#UD] // This instruction is only allowed at CPL 0 IF (CPL != 0) EXCEPTION [#GP] IF (rAX contains an unsupported system-physical address) EXCEPTION [#GP] In this case, in function "svm_vmexit_do_vmload" and "svm_vmexit_do_vmsave", the EFER.SVME should be enabled if nestedhvm_enabled(v->domain) is true. I could also add the check to make sure that L1 guest enable the EFER.SVME (i.e. using the "hvm_svm_enabled(v)" macro). Also, the nestedsvm_vmcb_map() is trying to access VMCB which from a given address. In this case, I also think it should be #GP when it fails. Suravee