Simon Kagstrom
2006-May-11 15:02 UTC
[Xen-devel] [PATCH] reenabling ptrace for paravirtualized guests
Debugging with the gdbserver has been broken for paravirtualized guests for a while since parts of xc_ptrace.c assumes HVM guests. The following patch fixes this along with single-stepping in attached domains. The patch tries to separate HVM-specific address handling. Changes: - Functions va_to_ma32 and va_to_ma64 have been added. These translate virtual to machine addresses through a given page directory/table and optionally through page_array for HVM guests. These have replaced page_array-references. - paging_enabled now contains a check for HVM guests, and always returns 1 for PV guests. - Reversed logic in PTRACE_SINGLESTEP to allow stepping in attached domains and disallow stepping in corefiles. NOTE: I only have access to "regular" 32-bit hardware, so I''ve only been able to test map_domain_va32 for PV guests. It would be nice if other people could test it on HVM, PAE and 64-bit guests. // Simon Signed-off-by: Simon Kagstrom <simon.kagstrom@bth.se> ===File /tmp/xc_ptrace.patch===============================diff -r fc9ec6fd3400 tools/libxc/xc_ptrace.c --- a/tools/libxc/xc_ptrace.c Mon May 08 14:56:18 2006 +0100 +++ b/tools/libxc/xc_ptrace.c Thu May 11 16:49:59 2006 +0200 @@ -100,8 +100,15 @@ static inline int static inline int paging_enabled(vcpu_guest_context_t *v) { - unsigned long cr0 = v->ctrlreg[0]; - return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG); + /* This check can be removed when Xen places the correct values in + * cr0 for paravirtualized guests. + */ + if ( (v->flags & VGCF_HVM_GUEST) == 1 ) { + unsigned long cr0 = v->ctrlreg[0]; + + return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG); + } + return 1; } /* @@ -157,6 +164,43 @@ static long nr_pages static long nr_pages = 0; static unsigned long *page_array = NULL; + +static unsigned long +va_to_ma32(int cpu, + uint32_t *table, + unsigned long idx) +{ + unsigned long out; + + /* Paravirtualized domains store machine addresses in tables while + * HVM domains keep pseudo-physical addresses. HVM domains + * therefore need one extra translation. + */ + if ( (out = table[idx]) == 0) + return 0; + if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) ) + out = page_array[idx] << PAGE_SHIFT; + return out; +} + +static unsigned long +va_to_ma64(int cpu, + uint64_t *table, + unsigned long idx) +{ + unsigned long out; + + /* Paravirtualized domains store machine addresses in tables while + * HVM domains keep pseudo-physical addresses. HVM domains + * therefore need one extra translation. + */ + if ( (out = table[idx]) == 0) + return 0; + if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) ) + out = page_array[idx] << PAGE_SHIFT; + return out; +} + static void * map_domain_va_32( int xc_handle, @@ -188,10 +232,8 @@ map_domain_va_32( if ( cr3_virt[cpu] == NULL ) return NULL; } - if ( (pde = cr3_virt[cpu][vtopdi(va)]) == 0 ) - return NULL; - if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) ) - pde = page_array[pde >> PAGE_SHIFT] << PAGE_SHIFT; + if ( (pde = va_to_ma32(cpu, cr3_virt[cpu], vtopdi(va))) == 0 ) + return NULL; if ( pde != pde_phys[cpu] ) { pde_phys[cpu] = pde; @@ -203,10 +245,9 @@ map_domain_va_32( if ( pde_virt[cpu] == NULL ) return NULL; } - if ( (page = pde_virt[cpu][vtopti(va)]) == 0 ) - return NULL; - if (ctxt[cpu].flags & VGCF_HVM_GUEST) - page = page_array[page >> PAGE_SHIFT] << PAGE_SHIFT; + if ( (page = va_to_ma32(cpu, pde_virt[cpu], vtopti(va))) == 0 ) + return NULL; + if ( (page != page_phys[cpu]) || (perm != prev_perm[cpu]) ) { page_phys[cpu] = page; @@ -243,25 +284,25 @@ map_domain_va_pae( if ( l3 == NULL ) return NULL; - l2p = l3[l3_table_offset_pae(va)] >> PAGE_SHIFT; - l2p = page_array[l2p]; - l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l2p); + if ( ( l2p = va_to_ma64(cpu, l3, l3_table_offset_pae(va)) ) == 0 ) + return NULL; + l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l2p >> PAGE_SHIFT); munmap(l3, PAGE_SIZE); if ( l2 == NULL ) return NULL; - l1p = l2[l2_table_offset_pae(va)] >> PAGE_SHIFT; - l1p = page_array[l1p]; - l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, l1p); + if ( ( l1p = va_to_ma64(cpu, l2, l2_table_offset_pae(va)) ) == 0 ) + return NULL; + l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, l1p >> PAGE_SHIFT); munmap(l2, PAGE_SIZE); if ( l1 == NULL ) return NULL; - p = l1[l1_table_offset_pae(va)] >> PAGE_SHIFT; - p = page_array[p]; + if ( ( p = va_to_ma64(cpu, l1, l1_table_offset_pae(va)) ) == 0 ) + return NULL; if ( v != NULL ) munmap(v, PAGE_SIZE); - v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p); + v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p >> PAGE_SHIFT); munmap(l1, PAGE_SIZE); if ( v == NULL ) return NULL; @@ -289,38 +330,41 @@ map_domain_va_64( if ( l4 == NULL ) return NULL; - l3p = l4[l4_table_offset(va)] >> PAGE_SHIFT; - l3p = page_array[l3p]; - l3 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l3p); + if ( ( l3p = va_to_ma64(cpu, l4, l4_table_offset(va)) ) == 0 ) + return NULL; + l3 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l3p >> PAGE_SHIFT); munmap(l4, PAGE_SIZE); if ( l3 == NULL ) return NULL; - l2p = l3[l3_table_offset(va)] >> PAGE_SHIFT; - l2p = page_array[l2p]; - l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l2p); + if ( ( l2p = va_to_ma64(cpu, l3, l3_table_offset(va)) ) == 0 ) + return NULL; + l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l2p >> PAGE_SHIFT); munmap(l3, PAGE_SIZE); if ( l2 == NULL ) return NULL; l1 = NULL; - l1e = l2[l2_table_offset(va)]; + if ( ( l1e = va_to_ma64(cpu, l2, l2_table_offset(va)) ) == 0 ) + return NULL; l1p = l1e >> PAGE_SHIFT; if (l1e & 0x80) { /* 2M pages */ p = (l1p + l1_table_offset(va)); + if ( (ctxt[cpu].flags & VGCF_HVM_GUEST)) + p = page_array[p] << PAGE_SHIFT; } else { /* 4K pages */ - l1p = page_array[l1p]; - l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, l1p); + if ( ( l1p = va_to_ma64(cpu, l1e, l1_table_offset(va)) ) == 0 ) + return NULL; + l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, l1p >> PAGE_SHIFT); munmap(l2, PAGE_SIZE); if ( l1 == NULL ) return NULL; - p = l1[l1_table_offset(va)] >> PAGE_SHIFT; - } - p = page_array[p]; + p = va_to_ma64(cpu, l1, l1_table_offset(va)); + } if ( v != NULL ) munmap(v, PAGE_SIZE); - v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p); + v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p >> PAGE_SHIFT); if (l1) munmap(l1, PAGE_SIZE); if ( v == NULL ) @@ -381,7 +425,7 @@ map_domain_va( if ( v != NULL ) munmap(v, PAGE_SIZE); - page = page_array[va >> PAGE_SHIFT] << PAGE_SHIFT; + page = va_to_ma32(cpu, (uint32_t*)page_array, va); v = xc_map_foreign_range( xc_handle, current_domid, PAGE_SIZE, perm, page >> PAGE_SHIFT); @@ -526,7 +570,7 @@ xc_ptrace( break; case PTRACE_SINGLESTEP: - if (!current_isfile) + if (current_isfile) goto out_unspported; /* XXX not yet supported */ /* XXX we can still have problems if the user switches threads * during single-stepping - but that just seems retarded =========================================================== _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Hollis Blanchard
2006-May-11 21:29 UTC
Re: [Xen-devel] [PATCH] reenabling ptrace for paravirtualized guests
On Thu, 2006-05-11 at 17:02 +0200, Simon Kagstrom wrote:> Debugging with the gdbserver has been broken for paravirtualized > guests for a while since parts of xc_ptrace.c assumes HVM guests. The > following patch fixes this along with single-stepping in attached > domains. The patch tries to separate HVM-specific address > handling. Changes: > > - Functions va_to_ma32 and va_to_ma64 have been added. These translate > virtual to machine addresses through a given page directory/table > and optionally through page_array for HVM guests. These have > replaced page_array-references. > > - paging_enabled now contains a check for HVM guests, and always > returns 1 for PV guests. > > - Reversed logic in PTRACE_SINGLESTEP to allow stepping in attached > domains and disallow stepping in corefiles. > > NOTE: I only have access to "regular" 32-bit hardware, so I''ve only > been able to test map_domain_va32 for PV guests. It would be nice if > other people could test it on HVM, PAE and 64-bit guests. > > // Simon > > Signed-off-by: Simon Kagstrom <simon.kagstrom@bth.se> > > ===File /tmp/xc_ptrace.patch===============================> diff -r fc9ec6fd3400 tools/libxc/xc_ptrace.c > --- a/tools/libxc/xc_ptrace.c Mon May 08 14:56:18 2006 +0100 > +++ b/tools/libxc/xc_ptrace.c Thu May 11 16:49:59 2006 +0200 > @@ -100,8 +100,15 @@ static inline int > static inline int > paging_enabled(vcpu_guest_context_t *v) > { > - unsigned long cr0 = v->ctrlreg[0]; > - return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG); > + /* This check can be removed when Xen places the correct values in > + * cr0 for paravirtualized guests. > + */ > + if ( (v->flags & VGCF_HVM_GUEST) == 1 ) { > + unsigned long cr0 = v->ctrlreg[0]; > + > + return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG); > + } > + return 1; > }Instead of this, please look at the patch Ryan sent (Re: [Xen-devel] [PATCH] paging_enabled and non-HVM guests)? You''ll need to clean up the whitespace at least though.> /* > @@ -157,6 +164,43 @@ static long nr_pages > static long nr_pages = 0; > static unsigned long *page_array = NULL; > > + > +static unsigned long > +va_to_ma32(int cpu, > + uint32_t *table, > + unsigned long idx) > +{ > + unsigned long out; > + > + /* Paravirtualized domains store machine addresses in tables while > + * HVM domains keep pseudo-physical addresses. HVM domains > + * therefore need one extra translation. > + */ > + if ( (out = table[idx]) == 0) > + return 0; > + if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) ) > + out = page_array[idx] << PAGE_SHIFT; > + return out; > +} > + > +static unsigned long > +va_to_ma64(int cpu, > + uint64_t *table, > + unsigned long idx) > +{ > + unsigned long out; > + > + /* Paravirtualized domains store machine addresses in tables while > + * HVM domains keep pseudo-physical addresses. HVM domains > + * therefore need one extra translation. > + */ > + if ( (out = table[idx]) == 0)Isn''t 0 as a physical or machine address valid?>+ return 0;Couldn''t 0 be a valid machine address? Can you leave these checks and the table indexing to the callers, where they are now?> + if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) ) > + out = page_array[idx] << PAGE_SHIFT; > + return out; > +}I''d suggest renaming "out" to be more descriptive, like "maddr". Since you don''t have an HVM system to test on, you should probably CC the people who do who''ve worked on xc_ptrace.c. hg log suggests Kamble, Nitin A <nitin.a.kamble@intel.com>. -- Hollis Blanchard IBM Linux Technology Center _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Simon Kagstrom
2006-May-12 07:07 UTC
Re: [Xen-devel] [PATCH] reenabling ptrace for paravirtualized guests
Thanks for your comments! At Thu, 11 May 2006 16:29:57 -0500, Hollis Blanchard wrote:> > static inline int > > paging_enabled(vcpu_guest_context_t *v) > > { > > - unsigned long cr0 = v->ctrlreg[0]; > > - return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG); > > + /* This check can be removed when Xen places the correct values in > > + * cr0 for paravirtualized guests. > > + */ > > + if ( (v->flags & VGCF_HVM_GUEST) == 1 ) { > > + unsigned long cr0 = v->ctrlreg[0]; > > + > > + return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG); > > + } > > + return 1; > > } > > Instead of this, please look at the patch Ryan sent (Re: [Xen-devel] > [PATCH] paging_enabled and non-HVM guests)? You''ll need to clean up the > whitespace at least though.I''ll comment Ryans patch below too make sure I understand what you mean.> diff -r c4eead8a925b tools/libxc/xc_ptrace.c > --- a/tools/libxc/xc_ptrace.c Sun Apr 16 14:41:31 2006 > +++ b/tools/libxc/xc_ptrace.c Thu Apr 20 22:44:35 2006 > @@ -281,8 +281,10 @@ > uint64_t *l4, *l3, *l2, *l1; > static void *v; > > +#if 0 > if ((ctxt[cpu].ctrlreg[4] & 0x20) == 0 ) /* legacy ia32 mode */ > return map_domain_va_32(xc_handle, cpu, guest_va, perm); > +#endifIs this check valid for non-HVM guests, i.e., is it possible to have 32-bit PV-guests on a 64-bit hypervisor?> [3 setup_sane_cr0.patch <text/plain; us-ascii (7bit)>] > diff -r c4eead8a925b tools/libxc/xc_linux_build.c > --- a/tools/libxc/xc_linux_build.c Sun Apr 16 14:41:31 2006 > +++ b/tools/libxc/xc_linux_build.c Thu Apr 20 22:45:21 2006 > @@ -45,6 +45,11 @@ > #ifdef __ia64__ > #define probe_aout9(image,image_size,load_funcs) 1 > #endif > + > +/* from xc_ptrace.h */ > +#define X86_CR0_PE 0x00000001 /* Enable Protected Mode (RW) */ > +#define X86_CR0_PG 0x80000000 /* Paging (RW) */ > + > > struct initrd_info { > enum { INITRD_none, INITRD_file, INITRD_mem } type; > @@ -1174,6 +1179,8 @@ > ctxt->failsafe_callback_eip = 0; > ctxt->syscall_callback_eip = 0; > #endif > + /* set sane cr0 bits, protected and paging enabled */ > + ctxt->ctrlreg[0] = X86_CR0_PE|X86_CR0_PG; > #endif /* x86 */ > [...] > --- a/linux-2.6-xen-sparse/drivers/xen/core/smpboot.c Sun Apr 16 14:41:31 2006 > +++ b/linux-2.6-xen-sparse/drivers/xen/core/smpboot.c Thu Apr 20 22:45:36 2006 > @@ -216,6 +216,8 @@ > > ctxt.gs_base_kernel = (unsigned long)(cpu_pda(vcpu)); > #endif > + /* set sane cr0 bits, protected and paging enabled */ > + ctxt.ctrlreg[0] = 0x80000001; > > BUG_ON(HYPERVISOR_vcpu_op(VCPUOP_initialise, vcpu, &ctxt)); > }I''ll add this to the patch.> > /* > > @@ -157,6 +164,43 @@ static long nr_pages > > static long nr_pages = 0; > > static unsigned long *page_array = NULL; > > > > +static unsigned long > > +va_to_ma64(int cpu, > > + uint64_t *table, > > + unsigned long idx) > > +{ > > + unsigned long out; > > + > > + /* Paravirtualized domains store machine addresses in tables while > > + * HVM domains keep pseudo-physical addresses. HVM domains > > + * therefore need one extra translation. > > + */ > > + if ( (out = table[idx]) == 0) > > Isn''t 0 as a physical or machine address valid? > > >+ return 0; > > Couldn''t 0 be a valid machine address? > > Can you leave these checks and the table indexing to the callers, where > they are now?True, I''ll just return the value and revert the table indexing. I''m a bit unsure about the checks-for-0 though. map_domain_va_32 does this, but map_domain_va_pae and map_domain_va_64 contained no checks. Maybe the proper fix is just to remove the checks in map_domain_va_32 as 0 can be a valid physical/machine address?> Since you don''t have an HVM system to test on, you should probably CC > the people who do who''ve worked on xc_ptrace.c. hg log suggests Kamble, > Nitin A <nitin.a.kamble@intel.com>.I''ll send a reworked patch. Nitin: could you test the next patch? // Simon _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Keir Fraser
2006-May-12 07:19 UTC
Re: [Xen-devel] [PATCH] reenabling ptrace for paravirtualized guests
On 12 May 2006, at 08:07, Simon Kagstrom wrote:>> ctxt.gs_base_kernel = (unsigned long)(cpu_pda(vcpu)); >> #endif >> + /* set sane cr0 bits, protected and paging enabled */ >> + ctxt.ctrlreg[0] = 0x80000001; >> >> BUG_ON(HYPERVISOR_vcpu_op(VCPUOP_initialise, vcpu, &ctxt)); >> } > > I''ll add this to the patch.This can be done entirely in Xen without guest modification and avoiding magic numbers. Add the following to arch_set_info_guest, immediately after the memcpy to guest_context: /* Only CR0.TS is modifiable by guest or admin. */ v->arch.guest_context.ctrl_reg[0] &= X86_CR0_TS; v->arch.guest_context.ctrl_reg[0] |= read_cr0() & ~X86_CR0_TS; Whether we should fill in from real CR0 or simply set a static selection of flags is open to debate. I don''t really think it matters either way. -- Keir _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Simon Kagstrom
2006-May-12 09:21 UTC
Re: [Xen-devel] [PATCH] reenabling ptrace for paravirtualized guests
Here is a new version of the patch which reenables ptrace for paravirtualized guests and enables single-stepping in GDB. Changes: - Added to_ma function which translates a physical address to a machine address for HVM guests and just returns the address for PV guests. - Use to_ma() in map_domain_va_32, map_domain_va_pae and map_domain_va_64 to abstract away differences between HVM and PV guests. - Reenable single-stepping for attached domains - (From Keir, Ryan) Export CR0 to PV guests as well. I''ve removed the address checks against 0 since the last version since physical/machine address 0 should be valid. Invalid translations should be catched in xc_map_foreign_domain. As before, I have not been able to test the patch on HVM, PAE or 64-bit domains and would appreciate if someone could give the patch a try. Signed-Off-By: Simon Kagstrom <simon.kagstrom@bth.se> ===File /tmp/xc_ptrace.patch===============================diff -r 5df90f95916e tools/libxc/xc_ptrace.c --- a/tools/libxc/xc_ptrace.c Fri May 12 10:36:26 2006 +0200 +++ b/tools/libxc/xc_ptrace.c Fri May 12 11:13:14 2006 +0200 @@ -157,6 +157,27 @@ static long nr_pages static long nr_pages = 0; static unsigned long *page_array = NULL; + +/* + * Translates physical addresses to machine addresses for HVM + * guests. For paravirtual domains the function will just return the + * given address. + * + * This function should be used when reading page directories/page + * tables. + * + */ +static unsigned long +to_ma(int cpu, + unsigned long in_addr) +{ + unsigned long maddr = in_addr; + + if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) ) + maddr = page_array[maddr >> PAGE_SHIFT] << PAGE_SHIFT; + return maddr; +} + static void * map_domain_va_32( int xc_handle, @@ -188,10 +209,7 @@ map_domain_va_32( if ( cr3_virt[cpu] == NULL ) return NULL; } - if ( (pde = cr3_virt[cpu][vtopdi(va)]) == 0 ) - return NULL; - if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) ) - pde = page_array[pde >> PAGE_SHIFT] << PAGE_SHIFT; + pde = to_ma(cpu, cr3_virt[cpu][vtopdi(va)]); if ( pde != pde_phys[cpu] ) { pde_phys[cpu] = pde; @@ -203,10 +221,8 @@ map_domain_va_32( if ( pde_virt[cpu] == NULL ) return NULL; } - if ( (page = pde_virt[cpu][vtopti(va)]) == 0 ) - return NULL; - if (ctxt[cpu].flags & VGCF_HVM_GUEST) - page = page_array[page >> PAGE_SHIFT] << PAGE_SHIFT; + page = to_ma(cpu, pde_virt[cpu][vtopti(va)]); + if ( (page != page_phys[cpu]) || (perm != prev_perm[cpu]) ) { page_phys[cpu] = page; @@ -243,25 +259,22 @@ map_domain_va_pae( if ( l3 == NULL ) return NULL; - l2p = l3[l3_table_offset_pae(va)] >> PAGE_SHIFT; - l2p = page_array[l2p]; - l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l2p); + l2p = to_ma(cpu, l3[l3_table_offset_pae(va)]); + l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l2p >> PAGE_SHIFT); munmap(l3, PAGE_SIZE); if ( l2 == NULL ) return NULL; - l1p = l2[l2_table_offset_pae(va)] >> PAGE_SHIFT; - l1p = page_array[l1p]; - l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, l1p); + l1p = to_ma(cpu, l2[l2_table_offset_pae(va)]); + l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, l1p >> PAGE_SHIFT); munmap(l2, PAGE_SIZE); if ( l1 == NULL ) return NULL; - p = l1[l1_table_offset_pae(va)] >> PAGE_SHIFT; - p = page_array[p]; + p = to_ma(cpu, l1[l1_table_offset_pae(va)]); if ( v != NULL ) munmap(v, PAGE_SIZE); - v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p); + v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p >> PAGE_SHIFT); munmap(l1, PAGE_SIZE); if ( v == NULL ) return NULL; @@ -289,38 +302,35 @@ map_domain_va_64( if ( l4 == NULL ) return NULL; - l3p = l4[l4_table_offset(va)] >> PAGE_SHIFT; - l3p = page_array[l3p]; - l3 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l3p); + l3p = to_ma(cpu, l4[l4_table_offset(va)]); + l3 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l3p >> PAGE_SHIFT); munmap(l4, PAGE_SIZE); if ( l3 == NULL ) return NULL; - l2p = l3[l3_table_offset(va)] >> PAGE_SHIFT; - l2p = page_array[l2p]; - l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l2p); + l2p = to_ma(cpu, l3[l3_table_offset(va)]); + l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l2p >> PAGE_SHIFT); munmap(l3, PAGE_SIZE); if ( l2 == NULL ) return NULL; l1 = NULL; - l1e = l2[l2_table_offset(va)]; + l1e = to_ma(cpu, l2[l2_table_offset(va)]); l1p = l1e >> PAGE_SHIFT; if (l1e & 0x80) { /* 2M pages */ - p = (l1p + l1_table_offset(va)); + p = to_ma(cpu, (l1p + l1_table_offset(va)) << PAGE_SHIFT); } else { /* 4K pages */ - l1p = page_array[l1p]; - l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, l1p); + l1p = to_ma(cpu, l1e[l1_table_offset(va)]); + l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, l1p >> PAGE_SHIFT); munmap(l2, PAGE_SIZE); if ( l1 == NULL ) return NULL; - p = l1[l1_table_offset(va)] >> PAGE_SHIFT; - } - p = page_array[p]; + p = to_ma(cpu, l1[l1_table_offset(va)]); + } if ( v != NULL ) munmap(v, PAGE_SIZE); - v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p); + v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p >> PAGE_SHIFT); if (l1) munmap(l1, PAGE_SIZE); if ( v == NULL ) @@ -381,7 +391,7 @@ map_domain_va( if ( v != NULL ) munmap(v, PAGE_SIZE); - page = page_array[va >> PAGE_SHIFT] << PAGE_SHIFT; + page = to_ma(cpu, page_array[va >> PAGE_SHIFT]); v = xc_map_foreign_range( xc_handle, current_domid, PAGE_SIZE, perm, page >> PAGE_SHIFT); @@ -526,7 +536,7 @@ xc_ptrace( break; case PTRACE_SINGLESTEP: - if (!current_isfile) + if (current_isfile) goto out_unspported; /* XXX not yet supported */ /* XXX we can still have problems if the user switches threads * during single-stepping - but that just seems retarded diff -r 5df90f95916e xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c Fri May 12 10:36:26 2006 +0200 +++ b/xen/arch/x86/domain.c Fri May 12 11:13:14 2006 +0200 @@ -379,6 +379,9 @@ int arch_set_info_guest( v->arch.flags |= TF_kernel_mode; memcpy(&v->arch.guest_context, c, sizeof(*c)); + /* Only CR0.TS is modifiable by guest or admin. */ + v->arch.guest_context.ctrlreg[0] &= X86_CR0_TS; + v->arch.guest_context.ctrlreg[0] |= read_cr0() & ~X86_CR0_TS; init_int80_direct_trap(v); if ( !(c->flags & VGCF_HVM_GUEST) ) =========================================================== _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Keir Fraser
2006-May-14 19:11 UTC
Re: [Xen-devel] [PATCH] reenabling ptrace for paravirtualized guests
On 12 May 2006, at 10:21, Simon Kagstrom wrote:> As before, I have not been able to test the patch on HVM, PAE or > 64-bit domains and would appreciate if someone could give the patch a > try.At least x86/64 is screwed because the patch breaks the build. I checked in a temporary extra patch to fix the build but it won''t actually make xc_ptrace work on x86/64. -- Keir _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel