<suravee.suthikulpanit@amd.com>
2013-Aug-09 15:09 UTC
[PATCH 1/2 V6] x86/AMD: Fix nested svm crash due to assertion in __virt_to_maddr
From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Fix assertion in __virt_to_maddr when starting nested SVM guest
in debug mode. Investigation has shown that svm_vmsave/svm_vmload
make use of __pa() with invalid address.
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Reviewed-by: Tim Deegan <tim@xen.org>
---
Changes from V5:
- Clean up and separate the unrelated code into another patch.
xen/arch/x86/hvm/svm/svm.c | 58 ++++++++++++++++++++++++++++++-------
xen/include/asm-x86/hvm/svm/svm.h | 11 ++++---
2 files changed, 54 insertions(+), 15 deletions(-)
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 4cc4b15..b27f224 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1795,6 +1795,32 @@ svm_vmexit_do_vmrun(struct cpu_user_regs *regs,
return;
}
+static struct page_info *
+nsvm_get_nvmcb_page(struct vcpu *v, uint64_t vmcbaddr)
+{
+ p2m_type_t p2mt;
+ struct page_info *page;
+ struct nestedvcpu *nv = &vcpu_nestedhvm(v);
+
+ if ( !nestedsvm_vmcb_map(v, vmcbaddr) )
+ return NULL;
+
+ /* Need to translate L1-GPA to MPA */
+ page = get_page_from_gfn(v->domain,
+ nv->nv_vvmcxaddr >> PAGE_SHIFT,
+ &p2mt, P2M_ALLOC | P2M_UNSHARE);
+ if ( !page )
+ return NULL;
+
+ if ( !p2m_is_ram(p2mt) || p2m_is_readonly(p2mt) )
+ {
+ put_page(page);
+ return NULL;
+ }
+
+ return page;
+}
+
static void
svm_vmexit_do_vmload(struct vmcb_struct *vmcb,
struct cpu_user_regs *regs,
@@ -1802,24 +1828,30 @@ svm_vmexit_do_vmload(struct vmcb_struct *vmcb,
{
int ret;
unsigned int inst_len;
- struct nestedvcpu *nv = &vcpu_nestedhvm(v);
+ struct page_info *page;
if ( (inst_len = __get_instruction_length(v, INSTR_VMLOAD)) == 0 )
return;
- if (!nestedhvm_enabled(v->domain)) {
+ if ( !nestedhvm_enabled(v->domain) )
+ {
gdprintk(XENLOG_ERR, "VMLOAD: nestedhvm disabled, injecting
#UD\n");
ret = TRAP_invalid_op;
goto inject;
}
- if (!nestedsvm_vmcb_map(v, vmcbaddr)) {
- gdprintk(XENLOG_ERR, "VMLOAD: mapping vmcb failed, injecting
#UD\n");
+ page = nsvm_get_nvmcb_page(v, vmcbaddr);
+ if ( !page )
+ {
+ gdprintk(XENLOG_ERR,
+ "VMLOAD: mapping failed, injecting #UD\n");
ret = TRAP_invalid_op;
goto inject;
}
- svm_vmload(nv->nv_vvmcx);
+ svm_vmload_pa(page_to_maddr(page));
+ put_page(page);
+
/* State in L1 VMCB is stale now */
v->arch.hvm_svm.vmcb_in_sync = 0;
@@ -1838,25 +1870,29 @@ svm_vmexit_do_vmsave(struct vmcb_struct *vmcb,
{
int ret;
unsigned int inst_len;
- struct nestedvcpu *nv = &vcpu_nestedhvm(v);
+ struct page_info *page;
if ( (inst_len = __get_instruction_length(v, INSTR_VMSAVE)) == 0 )
return;
- if (!nestedhvm_enabled(v->domain)) {
+ if ( !nestedhvm_enabled(v->domain) )
+ {
gdprintk(XENLOG_ERR, "VMSAVE: nestedhvm disabled, injecting
#UD\n");
ret = TRAP_invalid_op;
goto inject;
}
- if (!nestedsvm_vmcb_map(v, vmcbaddr)) {
- gdprintk(XENLOG_ERR, "VMSAVE: mapping vmcb failed, injecting
#UD\n");
+ page = nsvm_get_nvmcb_page(v, vmcbaddr);
+ if ( !page )
+ {
+ gdprintk(XENLOG_ERR,
+ "VMSAVE: mapping vmcb failed, injecting #UD\n");
ret = TRAP_invalid_op;
goto inject;
}
- svm_vmsave(nv->nv_vvmcx);
-
+ svm_vmsave_pa(page_to_maddr(page));
+ put_page(page);
__update_guest_eip(regs, inst_len);
return;
diff --git a/xen/include/asm-x86/hvm/svm/svm.h
b/xen/include/asm-x86/hvm/svm/svm.h
index 64e7e25..1ffe6d6 100644
--- a/xen/include/asm-x86/hvm/svm/svm.h
+++ b/xen/include/asm-x86/hvm/svm/svm.h
@@ -41,18 +41,21 @@
#define SVM_REG_R14 (14)
#define SVM_REG_R15 (15)
-static inline void svm_vmload(void *vmcb)
+#define svm_vmload(x) svm_vmload_pa(__pa(x))
+#define svm_vmsave(x) svm_vmsave_pa(__pa(x))
+
+static inline void svm_vmload_pa(paddr_t vmcb)
{
asm volatile (
".byte 0x0f,0x01,0xda" /* vmload */
- : : "a" (__pa(vmcb)) : "memory" );
+ : : "a" (vmcb) : "memory" );
}
-static inline void svm_vmsave(void *vmcb)
+static inline void svm_vmsave_pa(paddr_t vmcb)
{
asm volatile (
".byte 0x0f,0x01,0xdb" /* vmsave */
- : : "a" (__pa(vmcb)) : "memory" );
+ : : "a" (vmcb) : "memory" );
}
static inline void svm_invlpga(unsigned long vaddr, uint32_t asid)
--
1.7.10.4
<suravee.suthikulpanit@amd.com>
2013-Aug-09 15:09 UTC
[PATCH 2/2] x86/AMD: Inject #GP instead of #UD when unable to map vmcb
From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
According to AMD Programmer''s Manual vol2, vmrun, vmsave and vmload
should inject #GP instead of #UD when unable to access memory
location for vmcb. Also, the code should make sure that L1 guest
EFER.SVME is not zero. Otherwise, #UD should be injected.
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Reviewed-by: Tim Deegan <tim@xen.org>
---
xen/arch/x86/hvm/svm/svm.c | 22 ++++++++++++----------
1 file changed, 12 insertions(+), 10 deletions(-)
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index b27f224..e71ae08 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1779,15 +1779,17 @@ static void
svm_vmexit_do_vmrun(struct cpu_user_regs *regs,
struct vcpu *v, uint64_t vmcbaddr)
{
- if (!nestedhvm_enabled(v->domain)) {
+ if ( !nestedhvm_enabled(v->domain) || !hvm_svm_enabled(v) )
+ {
gdprintk(XENLOG_ERR, "VMRUN: nestedhvm disabled, injecting
#UD\n");
hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
return;
}
- if (!nestedsvm_vmcb_map(v, vmcbaddr)) {
- gdprintk(XENLOG_ERR, "VMRUN: mapping vmcb failed, injecting
#UD\n");
- hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
+ if ( !nestedsvm_vmcb_map(v, vmcbaddr) )
+ {
+ gdprintk(XENLOG_ERR, "VMRUN: mapping vmcb failed, injecting
#GP\n");
+ hvm_inject_hw_exception(TRAP_gp_fault, HVM_DELIVER_NO_ERROR_CODE);
return;
}
@@ -1833,7 +1835,7 @@ svm_vmexit_do_vmload(struct vmcb_struct *vmcb,
if ( (inst_len = __get_instruction_length(v, INSTR_VMLOAD)) == 0 )
return;
- if ( !nestedhvm_enabled(v->domain) )
+ if ( !nestedhvm_enabled(v->domain) || !hvm_svm_enabled(v) )
{
gdprintk(XENLOG_ERR, "VMLOAD: nestedhvm disabled, injecting
#UD\n");
ret = TRAP_invalid_op;
@@ -1844,8 +1846,8 @@ svm_vmexit_do_vmload(struct vmcb_struct *vmcb,
if ( !page )
{
gdprintk(XENLOG_ERR,
- "VMLOAD: mapping failed, injecting #UD\n");
- ret = TRAP_invalid_op;
+ "VMLOAD: mapping failed, injecting #GP\n");
+ ret = TRAP_gp_fault;
goto inject;
}
@@ -1875,7 +1877,7 @@ svm_vmexit_do_vmsave(struct vmcb_struct *vmcb,
if ( (inst_len = __get_instruction_length(v, INSTR_VMSAVE)) == 0 )
return;
- if ( !nestedhvm_enabled(v->domain) )
+ if ( !nestedhvm_enabled(v->domain) || !hvm_svm_enabled(v) )
{
gdprintk(XENLOG_ERR, "VMSAVE: nestedhvm disabled, injecting
#UD\n");
ret = TRAP_invalid_op;
@@ -1886,8 +1888,8 @@ svm_vmexit_do_vmsave(struct vmcb_struct *vmcb,
if ( !page )
{
gdprintk(XENLOG_ERR,
- "VMSAVE: mapping vmcb failed, injecting #UD\n");
- ret = TRAP_invalid_op;
+ "VMSAVE: mapping vmcb failed, injecting #GP\n");
+ ret = TRAP_gp_fault;
goto inject;
}
--
1.7.10.4
Suravee Suthikulpanit
2013-Aug-09 15:22 UTC
Re: [PATCH 2/2] x86/AMD: Inject #GP instead of #UD when unable to map vmcb
Please ignore this one. I forgot to rename the macro used for checking the efer.svme bit as Jan has suggested. Suravee On 8/9/2013 10:09 AM, suravee.suthikulpanit@amd.com wrote:> From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> > > According to AMD Programmer''s Manual vol2, vmrun, vmsave and vmload > should inject #GP instead of #UD when unable to access memory > location for vmcb. Also, the code should make sure that L1 guest > EFER.SVME is not zero. Otherwise, #UD should be injected. > > Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> > Reviewed-by: Tim Deegan <tim@xen.org> > --- > xen/arch/x86/hvm/svm/svm.c | 22 ++++++++++++---------- > 1 file changed, 12 insertions(+), 10 deletions(-) > > diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c > index b27f224..e71ae08 100644 > --- a/xen/arch/x86/hvm/svm/svm.c > +++ b/xen/arch/x86/hvm/svm/svm.c > @@ -1779,15 +1779,17 @@ static void > svm_vmexit_do_vmrun(struct cpu_user_regs *regs, > struct vcpu *v, uint64_t vmcbaddr) > { > - if (!nestedhvm_enabled(v->domain)) { > + if ( !nestedhvm_enabled(v->domain) || !hvm_svm_enabled(v) ) > + { > gdprintk(XENLOG_ERR, "VMRUN: nestedhvm disabled, injecting #UD\n"); > hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE); > return; > } > > - if (!nestedsvm_vmcb_map(v, vmcbaddr)) { > - gdprintk(XENLOG_ERR, "VMRUN: mapping vmcb failed, injecting #UD\n"); > - hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE); > + if ( !nestedsvm_vmcb_map(v, vmcbaddr) ) > + { > + gdprintk(XENLOG_ERR, "VMRUN: mapping vmcb failed, injecting #GP\n"); > + hvm_inject_hw_exception(TRAP_gp_fault, HVM_DELIVER_NO_ERROR_CODE); > return; > } > > @@ -1833,7 +1835,7 @@ svm_vmexit_do_vmload(struct vmcb_struct *vmcb, > if ( (inst_len = __get_instruction_length(v, INSTR_VMLOAD)) == 0 ) > return; > > - if ( !nestedhvm_enabled(v->domain) ) > + if ( !nestedhvm_enabled(v->domain) || !hvm_svm_enabled(v) ) > { > gdprintk(XENLOG_ERR, "VMLOAD: nestedhvm disabled, injecting #UD\n"); > ret = TRAP_invalid_op; > @@ -1844,8 +1846,8 @@ svm_vmexit_do_vmload(struct vmcb_struct *vmcb, > if ( !page ) > { > gdprintk(XENLOG_ERR, > - "VMLOAD: mapping failed, injecting #UD\n"); > - ret = TRAP_invalid_op; > + "VMLOAD: mapping failed, injecting #GP\n"); > + ret = TRAP_gp_fault; > goto inject; > } > > @@ -1875,7 +1877,7 @@ svm_vmexit_do_vmsave(struct vmcb_struct *vmcb, > if ( (inst_len = __get_instruction_length(v, INSTR_VMSAVE)) == 0 ) > return; > > - if ( !nestedhvm_enabled(v->domain) ) > + if ( !nestedhvm_enabled(v->domain) || !hvm_svm_enabled(v) ) > { > gdprintk(XENLOG_ERR, "VMSAVE: nestedhvm disabled, injecting #UD\n"); > ret = TRAP_invalid_op; > @@ -1886,8 +1888,8 @@ svm_vmexit_do_vmsave(struct vmcb_struct *vmcb, > if ( !page ) > { > gdprintk(XENLOG_ERR, > - "VMSAVE: mapping vmcb failed, injecting #UD\n"); > - ret = TRAP_invalid_op; > + "VMSAVE: mapping vmcb failed, injecting #GP\n"); > + ret = TRAP_gp_fault; > goto inject; > } >
<suravee.suthikulpanit@amd.com>
2013-Aug-09 15:24 UTC
[PATCH 1/2 V6] x86/AMD: Fix nested svm crash due to assertion in __virt_to_maddr
From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Fix assertion in __virt_to_maddr when starting nested SVM guest
in debug mode. Investigation has shown that svm_vmsave/svm_vmload
make use of __pa() with invalid address.
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Reviewed-by: Tim Deegan <tim@xen.org>
---
Changes from V5:
- Clean up and separate the unrelated code into another patch.
xen/arch/x86/hvm/svm/svm.c | 58 ++++++++++++++++++++++++++++++-------
xen/include/asm-x86/hvm/svm/svm.h | 11 ++++---
2 files changed, 54 insertions(+), 15 deletions(-)
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 4cc4b15..b27f224 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1795,6 +1795,32 @@ svm_vmexit_do_vmrun(struct cpu_user_regs *regs,
return;
}
+static struct page_info *
+nsvm_get_nvmcb_page(struct vcpu *v, uint64_t vmcbaddr)
+{
+ p2m_type_t p2mt;
+ struct page_info *page;
+ struct nestedvcpu *nv = &vcpu_nestedhvm(v);
+
+ if ( !nestedsvm_vmcb_map(v, vmcbaddr) )
+ return NULL;
+
+ /* Need to translate L1-GPA to MPA */
+ page = get_page_from_gfn(v->domain,
+ nv->nv_vvmcxaddr >> PAGE_SHIFT,
+ &p2mt, P2M_ALLOC | P2M_UNSHARE);
+ if ( !page )
+ return NULL;
+
+ if ( !p2m_is_ram(p2mt) || p2m_is_readonly(p2mt) )
+ {
+ put_page(page);
+ return NULL;
+ }
+
+ return page;
+}
+
static void
svm_vmexit_do_vmload(struct vmcb_struct *vmcb,
struct cpu_user_regs *regs,
@@ -1802,24 +1828,30 @@ svm_vmexit_do_vmload(struct vmcb_struct *vmcb,
{
int ret;
unsigned int inst_len;
- struct nestedvcpu *nv = &vcpu_nestedhvm(v);
+ struct page_info *page;
if ( (inst_len = __get_instruction_length(v, INSTR_VMLOAD)) == 0 )
return;
- if (!nestedhvm_enabled(v->domain)) {
+ if ( !nestedhvm_enabled(v->domain) )
+ {
gdprintk(XENLOG_ERR, "VMLOAD: nestedhvm disabled, injecting
#UD\n");
ret = TRAP_invalid_op;
goto inject;
}
- if (!nestedsvm_vmcb_map(v, vmcbaddr)) {
- gdprintk(XENLOG_ERR, "VMLOAD: mapping vmcb failed, injecting
#UD\n");
+ page = nsvm_get_nvmcb_page(v, vmcbaddr);
+ if ( !page )
+ {
+ gdprintk(XENLOG_ERR,
+ "VMLOAD: mapping failed, injecting #UD\n");
ret = TRAP_invalid_op;
goto inject;
}
- svm_vmload(nv->nv_vvmcx);
+ svm_vmload_pa(page_to_maddr(page));
+ put_page(page);
+
/* State in L1 VMCB is stale now */
v->arch.hvm_svm.vmcb_in_sync = 0;
@@ -1838,25 +1870,29 @@ svm_vmexit_do_vmsave(struct vmcb_struct *vmcb,
{
int ret;
unsigned int inst_len;
- struct nestedvcpu *nv = &vcpu_nestedhvm(v);
+ struct page_info *page;
if ( (inst_len = __get_instruction_length(v, INSTR_VMSAVE)) == 0 )
return;
- if (!nestedhvm_enabled(v->domain)) {
+ if ( !nestedhvm_enabled(v->domain) )
+ {
gdprintk(XENLOG_ERR, "VMSAVE: nestedhvm disabled, injecting
#UD\n");
ret = TRAP_invalid_op;
goto inject;
}
- if (!nestedsvm_vmcb_map(v, vmcbaddr)) {
- gdprintk(XENLOG_ERR, "VMSAVE: mapping vmcb failed, injecting
#UD\n");
+ page = nsvm_get_nvmcb_page(v, vmcbaddr);
+ if ( !page )
+ {
+ gdprintk(XENLOG_ERR,
+ "VMSAVE: mapping vmcb failed, injecting #UD\n");
ret = TRAP_invalid_op;
goto inject;
}
- svm_vmsave(nv->nv_vvmcx);
-
+ svm_vmsave_pa(page_to_maddr(page));
+ put_page(page);
__update_guest_eip(regs, inst_len);
return;
diff --git a/xen/include/asm-x86/hvm/svm/svm.h
b/xen/include/asm-x86/hvm/svm/svm.h
index 64e7e25..1ffe6d6 100644
--- a/xen/include/asm-x86/hvm/svm/svm.h
+++ b/xen/include/asm-x86/hvm/svm/svm.h
@@ -41,18 +41,21 @@
#define SVM_REG_R14 (14)
#define SVM_REG_R15 (15)
-static inline void svm_vmload(void *vmcb)
+#define svm_vmload(x) svm_vmload_pa(__pa(x))
+#define svm_vmsave(x) svm_vmsave_pa(__pa(x))
+
+static inline void svm_vmload_pa(paddr_t vmcb)
{
asm volatile (
".byte 0x0f,0x01,0xda" /* vmload */
- : : "a" (__pa(vmcb)) : "memory" );
+ : : "a" (vmcb) : "memory" );
}
-static inline void svm_vmsave(void *vmcb)
+static inline void svm_vmsave_pa(paddr_t vmcb)
{
asm volatile (
".byte 0x0f,0x01,0xdb" /* vmsave */
- : : "a" (__pa(vmcb)) : "memory" );
+ : : "a" (vmcb) : "memory" );
}
static inline void svm_invlpga(unsigned long vaddr, uint32_t asid)
--
1.7.10.4
<suravee.suthikulpanit@amd.com>
2013-Aug-09 15:24 UTC
[PATCH 2/2 V2] x86/AMD: Inject #GP instead of #UD when unable to map vmcb
From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
According to AMD Programmer''s Manual vol2, vmrun, vmsave and vmload
should inject #GP instead of #UD when unable to access memory
location for vmcb. Also, the code should make sure that L1 guest
EFER.SVME is not zero. Otherwise, #UD should be injected.
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Reviewed-by: Tim Deegan <tim@xen.org>
---
Changes from V1:
- rename hvm_svm_enabled macro to nsvm_efer_svm_enabled (as Jan commented)
xen/arch/x86/hvm/svm/svm.c | 22 ++++++++++++----------
xen/include/asm-x86/hvm/svm/nestedsvm.h | 2 +-
2 files changed, 13 insertions(+), 11 deletions(-)
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index b27f224..214f27c 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1779,15 +1779,17 @@ static void
svm_vmexit_do_vmrun(struct cpu_user_regs *regs,
struct vcpu *v, uint64_t vmcbaddr)
{
- if (!nestedhvm_enabled(v->domain)) {
+ if ( !nestedhvm_enabled(v->domain) || !nsvm_efer_svm_enabled(v) )
+ {
gdprintk(XENLOG_ERR, "VMRUN: nestedhvm disabled, injecting
#UD\n");
hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
return;
}
- if (!nestedsvm_vmcb_map(v, vmcbaddr)) {
- gdprintk(XENLOG_ERR, "VMRUN: mapping vmcb failed, injecting
#UD\n");
- hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
+ if ( !nestedsvm_vmcb_map(v, vmcbaddr) )
+ {
+ gdprintk(XENLOG_ERR, "VMRUN: mapping vmcb failed, injecting
#GP\n");
+ hvm_inject_hw_exception(TRAP_gp_fault, HVM_DELIVER_NO_ERROR_CODE);
return;
}
@@ -1833,7 +1835,7 @@ svm_vmexit_do_vmload(struct vmcb_struct *vmcb,
if ( (inst_len = __get_instruction_length(v, INSTR_VMLOAD)) == 0 )
return;
- if ( !nestedhvm_enabled(v->domain) )
+ if ( !nestedhvm_enabled(v->domain) || !nsvm_efer_svm_enabled(v) )
{
gdprintk(XENLOG_ERR, "VMLOAD: nestedhvm disabled, injecting
#UD\n");
ret = TRAP_invalid_op;
@@ -1844,8 +1846,8 @@ svm_vmexit_do_vmload(struct vmcb_struct *vmcb,
if ( !page )
{
gdprintk(XENLOG_ERR,
- "VMLOAD: mapping failed, injecting #UD\n");
- ret = TRAP_invalid_op;
+ "VMLOAD: mapping failed, injecting #GP\n");
+ ret = TRAP_gp_fault;
goto inject;
}
@@ -1875,7 +1877,7 @@ svm_vmexit_do_vmsave(struct vmcb_struct *vmcb,
if ( (inst_len = __get_instruction_length(v, INSTR_VMSAVE)) == 0 )
return;
- if ( !nestedhvm_enabled(v->domain) )
+ if ( !nestedhvm_enabled(v->domain) || !nsvm_efer_svm_enabled(v) )
{
gdprintk(XENLOG_ERR, "VMSAVE: nestedhvm disabled, injecting
#UD\n");
ret = TRAP_invalid_op;
@@ -1886,8 +1888,8 @@ svm_vmexit_do_vmsave(struct vmcb_struct *vmcb,
if ( !page )
{
gdprintk(XENLOG_ERR,
- "VMSAVE: mapping vmcb failed, injecting #UD\n");
- ret = TRAP_invalid_op;
+ "VMSAVE: mapping vmcb failed, injecting #GP\n");
+ ret = TRAP_gp_fault;
goto inject;
}
diff --git a/xen/include/asm-x86/hvm/svm/nestedsvm.h
b/xen/include/asm-x86/hvm/svm/nestedsvm.h
index 0bc663a..f88b1bd 100644
--- a/xen/include/asm-x86/hvm/svm/nestedsvm.h
+++ b/xen/include/asm-x86/hvm/svm/nestedsvm.h
@@ -94,7 +94,7 @@ struct nestedsvm {
#define vcpu_nestedsvm(v) (vcpu_nestedhvm(v).u.nsvm)
/* True when l1 guest enabled SVM in EFER */
-#define hvm_svm_enabled(v) \
+#define nsvm_efer_svm_enabled(v) \
(!!((v)->arch.hvm_vcpu.guest_efer & EFER_SVME))
int nestedsvm_vmcb_map(struct vcpu *v, uint64_t vmcbaddr);
--
1.7.10.4