This patch contains the following VMX patches:
- ioemu: Update the address when doing a stosb/stosw/stosl. Without this patch
stosb writes req->count times at the same location req->addr. Not
exactly
the intended semantics.
- xc_ptrace.c: Use the page_array to refer to partition memory. This allows
you to use gdbserver on a VMX partition (and presumably partitions that use
shadow page tables).
- dom0_ops.c: Preserve ECF_VMX_GUEST flag for gdbserver. Without it you
cannot (re)set the VMX domain state.
- vmx.c: Added support for lmsw. Unmodified FreeBSD uses this when running
inside a VMX partition.
Signed-Off-By: Leendert van Doorn <leendert@watson.ibm.com>
===== tools/ioemu/iodev/cpu.cc 1.8 vs edited ====---
1.8/tools/ioemu/iodev/cpu.cc 2005-03-31 13:55:11 -05:00
+++ edited/tools/ioemu/iodev/cpu.cc 2005-04-24 03:57:13 -04:00
@@ -128,15 +128,13 @@
if (!req->pdata_valid) {
if(req->dir == IOREQ_READ){//read
//BX_INFO(("mmio[value]: <READ> addr:%llx, value:%llx, size:
%llx, count: %llx\n", req->addr, req->u.data, req->size,
req->count));
-
for (i = 0; i < req->count; i++) {
- BX_MEM_READ_PHYSICAL(req->addr, req->size, &req->u.data);
+ BX_MEM_READ_PHYSICAL(req->addr + (sign * i * req->size),
req->size, &req->u.data);
}
} else if(req->dir == IOREQ_WRITE) {//write
//BX_INFO(("mmio[value]: <WRITE> addr:%llx, value:%llx, size:
%llx, count: %llx\n", req->addr, req->u.data, req->size,
req->count));
-
for (i = 0; i < req->count; i++) {
- BX_MEM_WRITE_PHYSICAL(req->addr, req->size, &req->u.data);
+ BX_MEM_WRITE_PHYSICAL(req->addr + (sign * i * req->size),
req->size, &req->u.data);
}
}
} else {
===== tools/libxc/xc_ptrace.c 1.7 vs edited ====--- 1.7/tools/libxc/xc_ptrace.c
2005-04-18 18:03:28 -04:00
+++ edited/tools/libxc/xc_ptrace.c 2005-04-22 12:56:09 -04:00
@@ -75,7 +75,7 @@
int retval = xc_domain_getfullinfo(xc_handle, domid, cpu, NULL,
&ctxt[cpu]); \
if (retval) \
goto error_out; \
- cr3[cpu] = ctxt[cpu].pt_base; \
+ cr3[cpu] = ctxt[cpu].pt_base; /* physical address */ \
regs_valid[cpu] = 1; \
} \
@@ -128,11 +128,12 @@
static int xc_handle;
+static long nr_pages = 0;
+unsigned long *page_array = NULL;
static int regs_valid[MAX_VIRT_CPUS];
static unsigned long cr3[MAX_VIRT_CPUS];
static full_execution_context_t ctxt[MAX_VIRT_CPUS];
-
/* --------------------- */
static void *
@@ -140,6 +141,7 @@
{
unsigned long pde, page;
unsigned long va = (unsigned long)guest_va;
+ long npgs = xc_get_tot_pages(xc_handle, domid);
static unsigned long cr3_phys[MAX_VIRT_CPUS];
static unsigned long *cr3_virt[MAX_VIRT_CPUS];
@@ -150,6 +152,21 @@
static int prev_perm[MAX_VIRT_CPUS];
+ if (nr_pages != npgs) {
+ if (nr_pages > 0)
+ free(page_array);
+ nr_pages = npgs;
+ if ((page_array = malloc(nr_pages * sizeof(unsigned long))) == NULL) {
+ printf("Could not allocate memory\n");
+ goto error_out;
+ }
+
+ if (xc_get_pfn_list(xc_handle, domid, page_array, nr_pages) != nr_pages) {
+ printf("Could not get the page frame list\n");
+ goto error_out;
+ }
+ }
+
FETCH_REGS(cpu);
if (cr3[cpu] != cr3_phys[cpu])
@@ -162,8 +179,9 @@
cr3_phys[cpu] >> PAGE_SHIFT)) == NULL)
goto error_out;
}
- if ((pde = cr3_virt[cpu][vtopdi(va)]) == 0)
+ if ((pde = cr3_virt[cpu][vtopdi(va)]) == 0) /* logical address */
goto error_out;
+ pde = page_array[pde >> PAGE_SHIFT] << PAGE_SHIFT;
if (pde != pde_phys[cpu])
{
pde_phys[cpu] = pde;
@@ -174,8 +192,9 @@
pde_phys[cpu] >> PAGE_SHIFT)) == NULL)
goto error_out;
}
- if ((page = pde_virt[cpu][vtopti(va)]) == 0)
+ if ((page = pde_virt[cpu][vtopti(va)]) == 0) /* logical address */
goto error_out;
+ page = page_array[page >> PAGE_SHIFT] << PAGE_SHIFT;
if (page != page_phys[cpu] || perm != prev_perm[cpu])
{
page_phys[cpu] = page;
@@ -330,6 +349,7 @@
perror("dom0 op failed");
goto error_out;
}
+ /* FALLTHROUGH */
case PTRACE_CONT:
case PTRACE_DETACH:
if (request != PTRACE_SINGLESTEP) {
===== xen/arch/x86/dom0_ops.c 1.29 vs ? (writable without lock!) ====---
1.29/xen/arch/x86/dom0_ops.c 2005-04-20 07:50:03 -04:00
+++ ?/xen/arch/x86/dom0_ops.c 2005-04-25 00:50:11 -04:00
@@ -404,6 +404,10 @@
c->flags |= ECF_I387_VALID;
if ( KERNEL_MODE(ed, &ed->arch.user_ctxt) )
c->flags |= ECF_IN_KERNEL;
+#ifdef CONFIG_VMX
+ if (VMX_DOMAIN(ed))
+ c->flags |= ECF_VMX_GUEST;
+#endif
memcpy(&c->fpu_ctxt,
&ed->arch.i387,
sizeof(ed->arch.i387));
===== xen/arch/x86/vmx.c 1.43 vs edited ====--- 1.43/xen/arch/x86/vmx.c
2005-04-21 16:53:42 -04:00
+++ edited/xen/arch/x86/vmx.c 2005-04-24 22:51:14 -04:00
@@ -640,6 +640,84 @@
return 0;
}
+static int vmx_set_cr0(unsigned long value)
+{
+ struct exec_domain *d = current;
+ unsigned long old_base_mfn, mfn;
+ unsigned long eip;
+
+ /*
+ * CR0: We don''t want to lose PE and PG.
+ */
+ __vmwrite(GUEST_CR0, (value | X86_CR0_PE | X86_CR0_PG));
+
+ if (value & (X86_CR0_PE | X86_CR0_PG) &&
+ !test_bit(VMX_CPU_STATE_PG_ENABLED,
&d->arch.arch_vmx.cpu_state)) {
+ /*
+ * Enable paging
+ */
+ set_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state);
+ /*
+ * The guest CR3 must be pointing to the guest physical.
+ */
+ if ( !VALID_MFN(mfn = phys_to_machine_mapping(
+ d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT))
||
+ !get_page(pfn_to_page(mfn), d->domain) )
+ {
+ VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value = %lx",
+ d->arch.arch_vmx.cpu_cr3);
+ domain_crash_synchronous(); /* need to take a clean path */
+ }
+ old_base_mfn = pagetable_val(d->arch.guest_table) >>
PAGE_SHIFT;
+ if (old_base_mfn)
+ put_page(pfn_to_page(old_base_mfn));
+
+ /*
+ * Now arch.guest_table points to machine physical.
+ */
+ d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
+ update_pagetables(d);
+
+ VMX_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
+ (unsigned long) (mfn << PAGE_SHIFT));
+
+ __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
+ /*
+ * arch->shadow_table should hold the next CR3 for shadow
+ */
+ VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn =
%lx",
+ d->arch.arch_vmx.cpu_cr3, mfn);
+ } else {
+ if ((value & X86_CR0_PE) == 0) {
+ __vmread(GUEST_EIP, &eip);
+ VMX_DBG_LOG(DBG_LEVEL_1,
+ "Disabling CR0.PE at %%eip 0x%lx", eip);
+ if (vmx_assist(d, VMX_ASSIST_INVOKE)) {
+ set_bit(VMX_CPU_STATE_ASSIST_ENABLED,
+ &d->arch.arch_vmx.cpu_state);
+ __vmread(GUEST_EIP, &eip);
+ VMX_DBG_LOG(DBG_LEVEL_1,
+ "Transfering control to vmxassist %%eip 0x%lx", eip);
+ return 0; /* do not update eip! */
+ }
+ } else if (test_bit(VMX_CPU_STATE_ASSIST_ENABLED,
+ &d->arch.arch_vmx.cpu_state)) {
+ __vmread(GUEST_EIP, &eip);
+ VMX_DBG_LOG(DBG_LEVEL_1,
+ "Enabling CR0.PE at %%eip 0x%lx", eip);
+ if (vmx_assist(d, VMX_ASSIST_RESTORE)) {
+ clear_bit(VMX_CPU_STATE_ASSIST_ENABLED,
+ &d->arch.arch_vmx.cpu_state);
+ __vmread(GUEST_EIP, &eip);
+ VMX_DBG_LOG(DBG_LEVEL_1,
+ "Restoring to %%eip 0x%lx", eip);
+ return 0; /* do not update eip! */
+ }
+ }
+ }
+ return 1;
+}
+
#define CASE_GET_REG(REG, reg) \
case REG_ ## REG: value = regs->reg; break
@@ -650,7 +728,6 @@
{
unsigned long value;
unsigned long old_cr;
- unsigned long eip;
struct exec_domain *d = current;
switch (gp) {
@@ -675,80 +752,8 @@
switch(cr) {
case 0:
{
- unsigned long old_base_mfn, mfn;
-
- /*
- * CR0:
- * We don''t want to lose PE and PG.
- */
- __vmwrite(GUEST_CR0, (value | X86_CR0_PE | X86_CR0_PG));
- __vmwrite(CR0_READ_SHADOW, value);
-
- if (value & (X86_CR0_PE | X86_CR0_PG) &&
- !test_bit(VMX_CPU_STATE_PG_ENABLED,
&d->arch.arch_vmx.cpu_state)) {
- /*
- * Enable paging
- */
- set_bit(VMX_CPU_STATE_PG_ENABLED,
&d->arch.arch_vmx.cpu_state);
- /*
- * The guest CR3 must be pointing to the guest physical.
- */
- if ( !VALID_MFN(mfn = phys_to_machine_mapping(
- d->arch.arch_vmx.cpu_cr3 >>
PAGE_SHIFT)) ||
- !get_page(pfn_to_page(mfn), d->domain) )
- {
- VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value =
%lx",
- d->arch.arch_vmx.cpu_cr3);
- domain_crash_synchronous(); /* need to take a clean path */
- }
- old_base_mfn = pagetable_val(d->arch.guest_table) >>
PAGE_SHIFT;
- if ( old_base_mfn )
- put_page(pfn_to_page(old_base_mfn));
-
- /*
- * Now arch.guest_table points to machine physical.
- */
- d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
- update_pagetables(d);
-
- VMX_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
- (unsigned long) (mfn << PAGE_SHIFT));
-
- __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
- /*
- * arch->shadow_table should hold the next CR3 for shadow
- */
- VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn =
%lx",
- d->arch.arch_vmx.cpu_cr3, mfn);
- } else {
- if ((value & X86_CR0_PE) == 0) {
- __vmread(GUEST_EIP, &eip);
- VMX_DBG_LOG(DBG_LEVEL_1,
- "Disabling CR0.PE at %%eip 0x%lx", eip);
- if (vmx_assist(d, VMX_ASSIST_INVOKE)) {
- set_bit(VMX_CPU_STATE_ASSIST_ENABLED,
- &d->arch.arch_vmx.cpu_state);
- __vmread(GUEST_EIP, &eip);
- VMX_DBG_LOG(DBG_LEVEL_1,
- "Transfering control to vmxassist %%eip 0x%lx", eip);
- return 0; /* do not update eip! */
- }
- } else if (test_bit(VMX_CPU_STATE_ASSIST_ENABLED,
- &d->arch.arch_vmx.cpu_state)) {
- __vmread(GUEST_EIP, &eip);
- VMX_DBG_LOG(DBG_LEVEL_1,
- "Enabling CR0.PE at %%eip 0x%lx", eip);
- if (vmx_assist(d, VMX_ASSIST_RESTORE)) {
- clear_bit(VMX_CPU_STATE_ASSIST_ENABLED,
- &d->arch.arch_vmx.cpu_state);
- __vmread(GUEST_EIP, &eip);
- VMX_DBG_LOG(DBG_LEVEL_1,
- "Restoring to %%eip 0x%lx", eip);
- return 0; /* do not update eip! */
- }
- }
- }
- break;
+ __vmwrite(CR0_READ_SHADOW, value);
+ return vmx_set_cr0(value);
}
case 3:
{
@@ -790,8 +795,8 @@
domain_crash_synchronous(); /* need to take a clean path */
}
old_base_mfn = pagetable_val(d->arch.guest_table) >>
PAGE_SHIFT;
- d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
- if ( old_base_mfn )
+ d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
+ if (old_base_mfn)
put_page(pfn_to_page(old_base_mfn));
update_pagetables(d);
/*
@@ -892,6 +897,13 @@
__vmread(CR0_READ_SHADOW, &value);
value &= ~X86_CR0_TS; /* clear TS */
__vmwrite(CR0_READ_SHADOW, value);
+ break;
+ case TYPE_LMSW:
+ __vmwrite(CR0_READ_SHADOW, value);
+ value = (value & ~0xF) |
+ (((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF) |
+ 1 /* CR0.PE == 1 */;
+ return vmx_set_cr0(value);
break;
default:
__vmx_bug(regs);
===== xen/include/asm-x86/vmx.h 1.4 vs edited ====---
1.4/xen/include/asm-x86/vmx.h 2005-04-18 19:49:37 -04:00
+++ edited/xen/include/asm-x86/vmx.h 2005-04-24 13:55:18 -04:00
@@ -89,6 +89,7 @@
#define TYPE_MOV_TO_CR (0 << 4)
#define TYPE_MOV_FROM_CR (1 << 4)
#define TYPE_CLTS (2 << 4)
+#define TYPE_LMSW (3 << 4)
#define CONTROL_REG_ACCESS_REG 0x700 /* 10:8, general purpose
register */
#define REG_EAX (0 << 8)
#define REG_ECX (1 << 8)
@@ -98,6 +99,7 @@
#define REG_EBP (5 << 8)
#define REG_ESI (6 << 8)
#define REG_EDI (7 << 8)
+#define LMSW_SOURCE_DATA (0xFFFF << 16) /* 16:31 lmsw source */
/*
* Exit Qualifications for MOV for Debug Register Access
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Thanks for your changes. It is nice to see the ptrace support being
kept up to date. I obviously don''t have access to the requisite
hardware.
Have you tried the modified xc_ptrace on a paravirtualized VM? I don''t
think that the machine address read out of the page tables would index
correctly into the array of pages which, presumably, are in physical
order. I think the changes in map_guest_va should probably be
conditional on the guest being vmx.
-Kip
On 4/24/05, Leendert van Doorn <leendert@watson.ibm.com>
wrote:>
> This patch contains the following VMX patches:
>
> - ioemu: Update the address when doing a stosb/stosw/stosl. Without this
patch
> stosb writes req->count times at the same location req->addr. Not
exactly
> the intended semantics.
>
> - xc_ptrace.c: Use the page_array to refer to partition memory. This allows
> you to use gdbserver on a VMX partition (and presumably partitions that
use
> shadow page tables).
>
> - dom0_ops.c: Preserve ECF_VMX_GUEST flag for gdbserver. Without it you
> cannot (re)set the VMX domain state.
>
> - vmx.c: Added support for lmsw. Unmodified FreeBSD uses this when running
> inside a VMX partition.
>
> Signed-Off-By: Leendert van Doorn <leendert@watson.ibm.com>
>
> ===== tools/ioemu/iodev/cpu.cc 1.8 vs edited ====> ---
1.8/tools/ioemu/iodev/cpu.cc 2005-03-31 13:55:11 -05:00
> +++ edited/tools/ioemu/iodev/cpu.cc 2005-04-24 03:57:13 -04:00
> @@ -128,15 +128,13 @@
> if (!req->pdata_valid) {
> if(req->dir == IOREQ_READ){//read
> //BX_INFO(("mmio[value]: <READ>
addr:%llx, value:%llx, size: %llx, count: %llx\n", req->addr,
req->u.data, req->size, req->count));
> -
> for (i = 0; i < req->count; i++) {
> - BX_MEM_READ_PHYSICAL(req->addr,
req->size, &req->u.data);
> + BX_MEM_READ_PHYSICAL(req->addr +
(sign * i * req->size), req->size, &req->u.data);
> }
> } else if(req->dir == IOREQ_WRITE) {//write
> //BX_INFO(("mmio[value]: <WRITE>
addr:%llx, value:%llx, size: %llx, count: %llx\n", req->addr,
req->u.data, req->size, req->count));
> -
> for (i = 0; i < req->count; i++) {
> - BX_MEM_WRITE_PHYSICAL(req->addr,
req->size, &req->u.data);
> + BX_MEM_WRITE_PHYSICAL(req->addr
+ (sign * i * req->size), req->size, &req->u.data);
> }
> }
> } else {
> ===== tools/libxc/xc_ptrace.c 1.7 vs edited ====> ---
1.7/tools/libxc/xc_ptrace.c 2005-04-18 18:03:28 -04:00
> +++ edited/tools/libxc/xc_ptrace.c 2005-04-22 12:56:09 -04:00
> @@ -75,7 +75,7 @@
> int retval = xc_domain_getfullinfo(xc_handle, domid, cpu, NULL,
&ctxt[cpu]); \
> if (retval) \
> goto error_out; \
> - cr3[cpu] = ctxt[cpu].pt_base; \
> + cr3[cpu] = ctxt[cpu].pt_base; /* physical address */ \
> regs_valid[cpu] = 1; \
> } \
>
> @@ -128,11 +128,12 @@
>
> static int xc_handle;
> +static long nr_pages = 0;
> +unsigned long *page_array = NULL;
> static int regs_valid[MAX_VIRT_CPUS];
> static unsigned long cr3[MAX_VIRT_CPUS];
> static full_execution_context_t ctxt[MAX_VIRT_CPUS];
>
> -
> /* --------------------- */
>
> static void *
> @@ -140,6 +141,7 @@
> {
> unsigned long pde, page;
> unsigned long va = (unsigned long)guest_va;
> + long npgs = xc_get_tot_pages(xc_handle, domid);
>
> static unsigned long cr3_phys[MAX_VIRT_CPUS];
> static unsigned long *cr3_virt[MAX_VIRT_CPUS];
> @@ -150,6 +152,21 @@
>
> static int prev_perm[MAX_VIRT_CPUS];
>
> + if (nr_pages != npgs) {
> + if (nr_pages > 0)
> + free(page_array);
> + nr_pages = npgs;
> + if ((page_array = malloc(nr_pages * sizeof(unsigned long))) ==
NULL) {
> + printf("Could not allocate memory\n");
> + goto error_out;
> + }
> +
> + if (xc_get_pfn_list(xc_handle, domid, page_array, nr_pages) !=
nr_pages) {
> + printf("Could not get the page frame list\n");
> + goto error_out;
> + }
> + }
> +
> FETCH_REGS(cpu);
>
> if (cr3[cpu] != cr3_phys[cpu])
> @@ -162,8 +179,9 @@
> cr3_phys[cpu] >>
PAGE_SHIFT)) == NULL)
> goto error_out;
> }
> - if ((pde = cr3_virt[cpu][vtopdi(va)]) == 0)
> + if ((pde = cr3_virt[cpu][vtopdi(va)]) == 0) /* logical address */
> goto error_out;
> + pde = page_array[pde >> PAGE_SHIFT] << PAGE_SHIFT;
> if (pde != pde_phys[cpu])
> {
> pde_phys[cpu] = pde;
> @@ -174,8 +192,9 @@
> pde_phys[cpu] >>
PAGE_SHIFT)) == NULL)
> goto error_out;
> }
> - if ((page = pde_virt[cpu][vtopti(va)]) == 0)
> + if ((page = pde_virt[cpu][vtopti(va)]) == 0) /* logical address */
> goto error_out;
> + page = page_array[page >> PAGE_SHIFT] << PAGE_SHIFT;
> if (page != page_phys[cpu] || perm != prev_perm[cpu])
> {
> page_phys[cpu] = page;
> @@ -330,6 +349,7 @@
> perror("dom0 op failed");
> goto error_out;
> }
> + /* FALLTHROUGH */
> case PTRACE_CONT:
> case PTRACE_DETACH:
> if (request != PTRACE_SINGLESTEP) {
> ===== xen/arch/x86/dom0_ops.c 1.29 vs ? (writable without lock!) ====>
--- 1.29/xen/arch/x86/dom0_ops.c 2005-04-20 07:50:03 -04:00
> +++ ?/xen/arch/x86/dom0_ops.c 2005-04-25 00:50:11 -04:00
> @@ -404,6 +404,10 @@
> c->flags |= ECF_I387_VALID;
> if ( KERNEL_MODE(ed, &ed->arch.user_ctxt) )
> c->flags |= ECF_IN_KERNEL;
> +#ifdef CONFIG_VMX
> + if (VMX_DOMAIN(ed))
> + c->flags |= ECF_VMX_GUEST;
> +#endif
> memcpy(&c->fpu_ctxt,
> &ed->arch.i387,
> sizeof(ed->arch.i387));
> ===== xen/arch/x86/vmx.c 1.43 vs edited ====> ---
1.43/xen/arch/x86/vmx.c 2005-04-21 16:53:42 -04:00
> +++ edited/xen/arch/x86/vmx.c 2005-04-24 22:51:14 -04:00
> @@ -640,6 +640,84 @@
> return 0;
> }
>
> +static int vmx_set_cr0(unsigned long value)
> +{
> + struct exec_domain *d = current;
> + unsigned long old_base_mfn, mfn;
> + unsigned long eip;
> +
> + /*
> + * CR0: We don''t want to lose PE and PG.
> + */
> + __vmwrite(GUEST_CR0, (value | X86_CR0_PE | X86_CR0_PG));
> +
> + if (value & (X86_CR0_PE | X86_CR0_PG) &&
> + !test_bit(VMX_CPU_STATE_PG_ENABLED,
&d->arch.arch_vmx.cpu_state)) {
> + /*
> + * Enable paging
> + */
> + set_bit(VMX_CPU_STATE_PG_ENABLED,
&d->arch.arch_vmx.cpu_state);
> + /*
> + * The guest CR3 must be pointing to the guest physical.
> + */
> + if ( !VALID_MFN(mfn = phys_to_machine_mapping(
> + d->arch.arch_vmx.cpu_cr3 >>
PAGE_SHIFT)) ||
> + !get_page(pfn_to_page(mfn), d->domain) )
> + {
> + VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value =
%lx",
> + d->arch.arch_vmx.cpu_cr3);
> + domain_crash_synchronous(); /* need to take a clean path */
> + }
> + old_base_mfn = pagetable_val(d->arch.guest_table) >>
PAGE_SHIFT;
> + if (old_base_mfn)
> + put_page(pfn_to_page(old_base_mfn));
> +
> + /*
> + * Now arch.guest_table points to machine physical.
> + */
> + d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
> + update_pagetables(d);
> +
> + VMX_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table =
%lx",
> + (unsigned long) (mfn << PAGE_SHIFT));
> +
> + __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
> + /*
> + * arch->shadow_table should hold the next CR3 for shadow
> + */
> + VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn =
%lx",
> + d->arch.arch_vmx.cpu_cr3, mfn);
> + } else {
> + if ((value & X86_CR0_PE) == 0) {
> + __vmread(GUEST_EIP, &eip);
> + VMX_DBG_LOG(DBG_LEVEL_1,
> + "Disabling CR0.PE at %%eip 0x%lx", eip);
> + if (vmx_assist(d, VMX_ASSIST_INVOKE)) {
> + set_bit(VMX_CPU_STATE_ASSIST_ENABLED,
> +
&d->arch.arch_vmx.cpu_state);
> + __vmread(GUEST_EIP, &eip);
> + VMX_DBG_LOG(DBG_LEVEL_1,
> + "Transfering control to vmxassist %%eip
0x%lx", eip);
> + return 0; /* do not update eip! */
> + }
> + } else if (test_bit(VMX_CPU_STATE_ASSIST_ENABLED,
> +
&d->arch.arch_vmx.cpu_state)) {
> + __vmread(GUEST_EIP, &eip);
> + VMX_DBG_LOG(DBG_LEVEL_1,
> + "Enabling CR0.PE at %%eip 0x%lx", eip);
> + if (vmx_assist(d, VMX_ASSIST_RESTORE)) {
> + clear_bit(VMX_CPU_STATE_ASSIST_ENABLED,
> +
&d->arch.arch_vmx.cpu_state);
> + __vmread(GUEST_EIP, &eip);
> + VMX_DBG_LOG(DBG_LEVEL_1,
> + "Restoring to %%eip 0x%lx", eip);
> + return 0; /* do not update eip! */
> + }
> + }
> + }
> + return 1;
> +}
> +
> #define CASE_GET_REG(REG, reg) \
> case REG_ ## REG: value = regs->reg; break
>
> @@ -650,7 +728,6 @@
> {
> unsigned long value;
> unsigned long old_cr;
> - unsigned long eip;
> struct exec_domain *d = current;
>
> switch (gp) {
> @@ -675,80 +752,8 @@
> switch(cr) {
> case 0:
> {
> - unsigned long old_base_mfn, mfn;
> -
> - /*
> - * CR0:
> - * We don''t want to lose PE and PG.
> - */
> - __vmwrite(GUEST_CR0, (value | X86_CR0_PE | X86_CR0_PG));
> - __vmwrite(CR0_READ_SHADOW, value);
> -
> - if (value & (X86_CR0_PE | X86_CR0_PG) &&
> - !test_bit(VMX_CPU_STATE_PG_ENABLED,
&d->arch.arch_vmx.cpu_state)) {
> - /*
> - * Enable paging
> - */
> - set_bit(VMX_CPU_STATE_PG_ENABLED,
&d->arch.arch_vmx.cpu_state);
> - /*
> - * The guest CR3 must be pointing to the guest physical.
> - */
> - if ( !VALID_MFN(mfn = phys_to_machine_mapping(
> - d->arch.arch_vmx.cpu_cr3 >>
PAGE_SHIFT)) ||
> - !get_page(pfn_to_page(mfn), d->domain) )
> - {
> - VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value =
%lx",
> - d->arch.arch_vmx.cpu_cr3);
> - domain_crash_synchronous(); /* need to take a clean path
*/
> - }
> - old_base_mfn = pagetable_val(d->arch.guest_table) >>
PAGE_SHIFT;
> - if ( old_base_mfn )
> - put_page(pfn_to_page(old_base_mfn));
> -
> - /*
> - * Now arch.guest_table points to machine physical.
> - */
> - d->arch.guest_table = mk_pagetable(mfn <<
PAGE_SHIFT);
> - update_pagetables(d);
> -
> - VMX_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table =
%lx",
> - (unsigned long) (mfn << PAGE_SHIFT));
> -
> - __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
> - /*
> - * arch->shadow_table should hold the next CR3 for shadow
> - */
> - VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn
= %lx",
> - d->arch.arch_vmx.cpu_cr3, mfn);
> - } else {
> - if ((value & X86_CR0_PE) == 0) {
> - __vmread(GUEST_EIP, &eip);
> - VMX_DBG_LOG(DBG_LEVEL_1,
> - "Disabling CR0.PE at %%eip 0x%lx", eip);
> - if (vmx_assist(d, VMX_ASSIST_INVOKE)) {
> - set_bit(VMX_CPU_STATE_ASSIST_ENABLED,
> -
&d->arch.arch_vmx.cpu_state);
> - __vmread(GUEST_EIP, &eip);
> - VMX_DBG_LOG(DBG_LEVEL_1,
> - "Transfering control to vmxassist %%eip
0x%lx", eip);
> - return 0; /* do not update eip! */
> - }
> - } else if (test_bit(VMX_CPU_STATE_ASSIST_ENABLED,
> -
&d->arch.arch_vmx.cpu_state)) {
> - __vmread(GUEST_EIP, &eip);
> - VMX_DBG_LOG(DBG_LEVEL_1,
> - "Enabling CR0.PE at %%eip 0x%lx", eip);
> - if (vmx_assist(d, VMX_ASSIST_RESTORE)) {
> - clear_bit(VMX_CPU_STATE_ASSIST_ENABLED,
> -
&d->arch.arch_vmx.cpu_state);
> - __vmread(GUEST_EIP, &eip);
> - VMX_DBG_LOG(DBG_LEVEL_1,
> - "Restoring to %%eip 0x%lx", eip);
> - return 0; /* do not update eip! */
> - }
> - }
> - }
> - break;
> + __vmwrite(CR0_READ_SHADOW, value);
> + return vmx_set_cr0(value);
> }
> case 3:
> {
> @@ -790,8 +795,8 @@
> domain_crash_synchronous(); /* need to take a clean path
*/
> }
> old_base_mfn = pagetable_val(d->arch.guest_table) >>
PAGE_SHIFT;
> - d->arch.guest_table = mk_pagetable(mfn <<
PAGE_SHIFT);
> - if ( old_base_mfn )
> + d->arch.guest_table = mk_pagetable(mfn <<
PAGE_SHIFT);
> + if (old_base_mfn)
> put_page(pfn_to_page(old_base_mfn));
> update_pagetables(d);
> /*
> @@ -892,6 +897,13 @@
> __vmread(CR0_READ_SHADOW, &value);
> value &= ~X86_CR0_TS; /* clear TS */
> __vmwrite(CR0_READ_SHADOW, value);
> + break;
> + case TYPE_LMSW:
> + __vmwrite(CR0_READ_SHADOW, value);
> + value = (value & ~0xF) |
> + (((exit_qualification & LMSW_SOURCE_DATA) >> 16)
& 0xF) |
> + 1 /* CR0.PE == 1 */;
> + return vmx_set_cr0(value);
> break;
> default:
> __vmx_bug(regs);
> ===== xen/include/asm-x86/vmx.h 1.4 vs edited ====> ---
1.4/xen/include/asm-x86/vmx.h 2005-04-18 19:49:37 -04:00
> +++ edited/xen/include/asm-x86/vmx.h 2005-04-24 13:55:18 -04:00
> @@ -89,6 +89,7 @@
> #define TYPE_MOV_TO_CR (0 << 4)
> #define TYPE_MOV_FROM_CR (1 << 4)
> #define TYPE_CLTS (2 << 4)
> +#define TYPE_LMSW (3 << 4)
> #define CONTROL_REG_ACCESS_REG 0x700 /* 10:8, general purpose
register */
> #define REG_EAX (0 << 8)
> #define REG_ECX (1 << 8)
> @@ -98,6 +99,7 @@
> #define REG_EBP (5 << 8)
> #define REG_ESI (6 << 8)
> #define REG_EDI (7 << 8)
> +#define LMSW_SOURCE_DATA (0xFFFF << 16) /*
16:31 lmsw source */
>
> /*
> * Exit Qualifications for MOV for Debug Register AccessH
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xensource.com
> http://lists.xensource.com/xen-devel
>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
# Have you tried the modified xc_ptrace on a paravirtualized VM? I don''t # think that the machine address read out of the page tables would index # correctly into the array of pages which, presumably, are in physical # order. I think the changes in map_guest_va should probably be # conditional on the guest being vmx. You are right, I didn''t try the gdbserver patches on a paravirtualized VM, but I would expect xc_get_pfn_list to return an identity map in that case so that the dereferences still work. If it doesn''t, then you need to test the flags for ECF_VMX_GUEST and do the dereferences conditionally. Leendert _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
On 4/24/2005 9:47 PM, Leendert van Doorn wrote:> This patch contains the following VMX patches: > > - ioemu: Update the address when doing a stosb/stosw/stosl. Without this patch > stosb writes req->count times at the same location req->addr. Not exactly > the intended semantics.The string operations should go to the if (req->pdata_valid) clause. Is pdata_valid==0 for your stos* cases? -Arun _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
> You are right, I didn''t try the gdbserver patches on a paravirtualized VM, > but I would expect xc_get_pfn_list to return an identity map in that case > so that the dereferences still work.Ok - I haven''t been keeping up to date on the helper functions.> If it doesn''t, then you need to test the flags for ECF_VMX_GUEST and do > the dereferences conditionally.Thanks. _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
#> - ioemu: Update the address when doing a stosb/stosw/stosl. Without this #> patch stosb writes req->count times at the same location req->addr. Not #> exactly the intended semantics. # # The string operations should go to the if (req->pdata_valid) clause. # Is pdata_valid==0 for your stos* cases? Yes. stos* only uses a value parameter (al, ax,or eax), so pdata_valid == 0. Addr, size and count are still valid Leendert _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Leendert van Doorn wrote:> #> - ioemu: Update the address when doing a stosb/stosw/stosl. Without this > #> patch stosb writes req->count times at the same location req->addr. Not > #> exactly the intended semantics. > # > # The string operations should go to the if (req->pdata_valid) clause. > # Is pdata_valid==0 for your stos* cases? > > Yes. stos* only uses a value parameter (al, ax,or eax), so pdata_valid == 0. > Addr, size and count are still valid >ok - makes sense. Though pdata_valid is generally 1 for string operations (this is always true for the port i/o case), this case is an exception. -Arun _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Core was generated by `gdbserver-xen curly:2345 --attach 5''.
Program terminated with signal 11, Segmentation fault.
warning: current_sos: Can''t read pathname for load map: Input/output
error
Reading symbols from /usr/lib/libxc.so.3.0...done.
Loaded symbols for /usr/lib/libxc.so.3.0
Reading symbols from /usr/lib/libxutil.so.3.0...done.
Loaded symbols for /usr/lib/libxutil.so.3.0
Reading symbols from /lib/i686/libc.so.6...done.
Loaded symbols for /lib/i686/libc.so.6
Reading symbols from /usr/lib/libz.so.1...done.
Loaded symbols for /usr/lib/libz.so.1
Reading symbols from /lib/ld-linux.so.2...done.
Loaded symbols for /lib/ld-linux.so.2
#0 0xb7fc74e5 in map_domain_va (domid=0x5, cpu=0x0,
guest_va=0xc018e8bc, perm=0x1) at xc_ptrace.c:184
184 pde = page_array[pde >> PAGE_SHIFT] << PAGE_SHIFT;
-Kip
On 4/25/05, Leendert van Doorn <leendert@watson.ibm.com>
wrote:>
> # Have you tried the modified xc_ptrace on a paravirtualized VM? I
don''t
> # think that the machine address read out of the page tables would index
> # correctly into the array of pages which, presumably, are in physical
> # order. I think the changes in map_guest_va should probably be
> # conditional on the guest being vmx.
>
> You are right, I didn''t try the gdbserver patches on a
paravirtualized VM,
> but I would expect xc_get_pfn_list to return an identity map in that case
> so that the dereferences still work.
>
> If it doesn''t, then you need to test the flags for ECF_VMX_GUEST
and do
> the dereferences conditionally.
>
> Leendert
>
>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel