Tim Deegan
2012-Sep-13 14:01 UTC
[PATCH] x86/mm: remove the linear mapping of the p2m tables
# HG changeset patch # User Tim Deegan <tim@xen.org> # Date 1347544824 -3600 # Node ID a770d1c8448d73ccf2ec36a5322532c2e3c14641 # Parent 5691e4cc17da7fe8664a67f1d07c3755c0ca34ed x86/mm: remove the linear mapping of the p2m tables. Mapping the p2m into the monitor tables was an important optimization on 32-bit builds, where it avoided mapping and unmapping p2m pages during a walk. On 64-bit it makes no difference -- see http://old-list-archives.xen.org/archives/html/xen-devel/2010-04/msg00981.html Get rid of it, and use the explicit walk for all lookups. Signed-off-by: Tim Deegan <tim@xen.org> diff -r 5691e4cc17da -r a770d1c8448d xen/arch/x86/mm/hap/hap.c --- a/xen/arch/x86/mm/hap/hap.c Thu Sep 13 10:23:17 2012 +0200 +++ b/xen/arch/x86/mm/hap/hap.c Thu Sep 13 15:00:24 2012 +0100 @@ -405,11 +405,6 @@ static void hap_install_xen_entries_in_l l4e[l4_table_offset(LINEAR_PT_VIRT_START)] l4e_from_pfn(mfn_x(l4mfn), __PAGE_HYPERVISOR); - /* Install the domain-specific P2M table */ - l4e[l4_table_offset(RO_MPT_VIRT_START)] - l4e_from_pfn(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))), - __PAGE_HYPERVISOR); - hap_unmap_domain_page(l4e); } diff -r 5691e4cc17da -r a770d1c8448d xen/arch/x86/mm/p2m-pt.c --- a/xen/arch/x86/mm/p2m-pt.c Thu Sep 13 10:23:17 2012 +0200 +++ b/xen/arch/x86/mm/p2m-pt.c Thu Sep 13 15:00:24 2012 +0100 @@ -460,186 +460,6 @@ out: return rv; } - -/* Read the current domain''s p2m table (through the linear mapping). */ -static mfn_t p2m_gfn_to_mfn_current(struct p2m_domain *p2m, - unsigned long gfn, p2m_type_t *t, - p2m_access_t *a, p2m_query_t q, - unsigned int *page_order) -{ - mfn_t mfn = _mfn(INVALID_MFN); - p2m_type_t p2mt = p2m_mmio_dm; - paddr_t addr = ((paddr_t)gfn) << PAGE_SHIFT; - /* XXX This is for compatibility with the old model, where anything not - * XXX marked as RAM was considered to be emulated MMIO space. - * XXX Once we start explicitly registering MMIO regions in the p2m - * XXX we will return p2m_invalid for unmapped gfns */ - - l1_pgentry_t l1e = l1e_empty(), *p2m_entry; - l2_pgentry_t l2e = l2e_empty(); - l3_pgentry_t l3e = l3e_empty(); - int ret; - - ASSERT(gfn < (RO_MPT_VIRT_END - RO_MPT_VIRT_START) - / sizeof(l1_pgentry_t)); - - /* - * Read & process L3 - */ - p2m_entry = (l1_pgentry_t *) - &__linear_l2_table[l2_linear_offset(RO_MPT_VIRT_START) - + l3_linear_offset(addr)]; -pod_retry_l3: - ret = __copy_from_user(&l3e, p2m_entry, sizeof(l3e)); - - if ( ret != 0 || !(l3e_get_flags(l3e) & _PAGE_PRESENT) ) - { - if ( (l3e_get_flags(l3e) & _PAGE_PSE) && - (p2m_flags_to_type(l3e_get_flags(l3e)) == p2m_populate_on_demand) ) - { - /* The read has succeeded, so we know that mapping exists */ - if ( q & P2M_ALLOC ) - { - if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_1G, q) ) - goto pod_retry_l3; - p2mt = p2m_invalid; - gdprintk(XENLOG_ERR, "%s: Allocate 1GB failed!\n", __func__); - goto out; - } - else - { - p2mt = p2m_populate_on_demand; - goto out; - } - } - goto pod_retry_l2; - } - - if ( l3e_get_flags(l3e) & _PAGE_PSE ) - { - p2mt = p2m_flags_to_type(l3e_get_flags(l3e)); - ASSERT(l3e_get_pfn(l3e) != INVALID_MFN || !p2m_is_ram(p2mt)); - if (p2m_is_valid(p2mt) ) - mfn = _mfn(l3e_get_pfn(l3e) + - l2_table_offset(addr) * L1_PAGETABLE_ENTRIES + - l1_table_offset(addr)); - else - p2mt = p2m_mmio_dm; - - if ( page_order ) - *page_order = PAGE_ORDER_1G; - goto out; - } - - /* - * Read & process L2 - */ - p2m_entry = &__linear_l1_table[l1_linear_offset(RO_MPT_VIRT_START) - + l2_linear_offset(addr)]; - -pod_retry_l2: - ret = __copy_from_user(&l2e, - p2m_entry, - sizeof(l2e)); - if ( ret != 0 - || !(l2e_get_flags(l2e) & _PAGE_PRESENT) ) - { - if( (l2e_get_flags(l2e) & _PAGE_PSE) - && ( p2m_flags_to_type(l2e_get_flags(l2e)) - == p2m_populate_on_demand ) ) - { - /* The read has succeeded, so we know that the mapping - * exits at this point. */ - if ( q & P2M_ALLOC ) - { - if ( !p2m_pod_demand_populate(p2m, gfn, - PAGE_ORDER_2M, q) ) - goto pod_retry_l2; - - /* Allocate failed. */ - p2mt = p2m_invalid; - printk("%s: Allocate failed!\n", __func__); - goto out; - } - else - { - p2mt = p2m_populate_on_demand; - goto out; - } - } - - goto pod_retry_l1; - } - - if (l2e_get_flags(l2e) & _PAGE_PSE) - { - p2mt = p2m_flags_to_type(l2e_get_flags(l2e)); - ASSERT(l2e_get_pfn(l2e) != INVALID_MFN || !p2m_is_ram(p2mt)); - - if ( p2m_is_valid(p2mt) ) - mfn = _mfn(l2e_get_pfn(l2e) + l1_table_offset(addr)); - else - p2mt = p2m_mmio_dm; - - if ( page_order ) - *page_order = PAGE_ORDER_2M; - goto out; - } - - /* - * Read and process L1 - */ - - /* Need to __copy_from_user because the p2m is sparse and this - * part might not exist */ -pod_retry_l1: - p2m_entry = &phys_to_machine_mapping[gfn]; - - ret = __copy_from_user(&l1e, - p2m_entry, - sizeof(l1e)); - - if ( ret == 0 ) { - unsigned long l1e_mfn = l1e_get_pfn(l1e); - p2mt = p2m_flags_to_type(l1e_get_flags(l1e)); - ASSERT( mfn_valid(_mfn(l1e_mfn)) || !p2m_is_ram(p2mt) || - p2m_is_paging(p2mt) ); - - if ( p2mt == p2m_populate_on_demand ) - { - /* The read has succeeded, so we know that the mapping - * exits at this point. */ - if ( q & P2M_ALLOC ) - { - if ( !p2m_pod_demand_populate(p2m, gfn, - PAGE_ORDER_4K, q) ) - goto pod_retry_l1; - - /* Allocate failed. */ - p2mt = p2m_invalid; - goto out; - } - else - { - p2mt = p2m_populate_on_demand; - goto out; - } - } - - if ( p2m_is_valid(p2mt) || p2m_is_grant(p2mt) ) - mfn = _mfn(l1e_mfn); - else - /* XXX see above */ - p2mt = p2m_mmio_dm; - } - - if ( page_order ) - *page_order = PAGE_ORDER_4K; -out: - *t = p2mt; - return mfn; -} - static mfn_t p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t, p2m_access_t *a, p2m_query_t q, @@ -666,10 +486,6 @@ p2m_gfn_to_mfn(struct p2m_domain *p2m, u /* This pfn is higher than the highest the p2m map currently holds */ return _mfn(INVALID_MFN); - /* Use the fast path with the linear mapping if we can */ - if ( p2m == p2m_get_hostp2m(current->domain) ) - return p2m_gfn_to_mfn_current(p2m, gfn, t, a, q, page_order); - mfn = pagetable_get_mfn(p2m_get_pagetable(p2m)); { @@ -904,17 +720,10 @@ long p2m_pt_audit_p2m(struct p2m_domain { unsigned long entry_count = 0, pmbad = 0; unsigned long mfn, gfn, m2pfn; - int test_linear; - struct domain *d = p2m->domain; ASSERT(p2m_locked_by_me(p2m)); ASSERT(pod_locked_by_me(p2m)); - test_linear = ( (d == current->domain) - && !pagetable_is_null(current->arch.monitor_table) ); - if ( test_linear ) - flush_tlb_local(); - /* Audit part one: walk the domain''s p2m table, checking the entries. */ if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) != 0 ) { diff -r 5691e4cc17da -r a770d1c8448d xen/arch/x86/mm/shadow/multi.c --- a/xen/arch/x86/mm/shadow/multi.c Thu Sep 13 10:23:17 2012 +0200 +++ b/xen/arch/x86/mm/shadow/multi.c Thu Sep 13 15:00:24 2012 +0100 @@ -1472,14 +1472,6 @@ void sh_install_xen_entries_in_l4(struct shadow_l4e_from_mfn(gl4mfn, __PAGE_HYPERVISOR); } - if ( shadow_mode_translate(v->domain) ) - { - /* install domain-specific P2M table */ - sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] - shadow_l4e_from_mfn(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))), - __PAGE_HYPERVISOR); - } - sh_unmap_domain_page(sl4e); } #endif diff -r 5691e4cc17da -r a770d1c8448d xen/include/asm-x86/p2m.h --- a/xen/include/asm-x86/p2m.h Thu Sep 13 10:23:17 2012 +0200 +++ b/xen/include/asm-x86/p2m.h Thu Sep 13 15:00:24 2012 +0100 @@ -35,22 +35,6 @@ extern bool_t opt_hap_1gb, opt_hap_2mb; /* - * The phys_to_machine_mapping maps guest physical frame numbers - * to machine frame numbers. It only exists for paging_mode_translate - * guests. It is organised in page-table format, which: - * - * (1) allows us to use it directly as the second pagetable in hardware- - * assisted paging and (hopefully) iommu support; and - * (2) lets us map it directly into the guest vcpus'' virtual address space - * as a linear pagetable, so we can read and write it easily. - * - * For (2) we steal the address space that would have normally been used - * by the read-only MPT map in a non-translated guest. (For - * paging_mode_external() guests this mapping is in the monitor table.) - */ -#define phys_to_machine_mapping ((l1_pgentry_t *)RO_MPT_VIRT_START) - -/* * The upper levels of the p2m pagetable always contain full rights; all * variation in the access control bits is made in the level-1 PTEs. *
Jan Beulich
2012-Sep-13 14:23 UTC
Re: [PATCH] x86/mm: remove the linear mapping of the p2m tables
>>> On 13.09.12 at 16:01, Tim Deegan <tim@xen.org> wrote: > # HG changeset patch > # User Tim Deegan <tim@xen.org> > # Date 1347544824 -3600 > # Node ID a770d1c8448d73ccf2ec36a5322532c2e3c14641 > # Parent 5691e4cc17da7fe8664a67f1d07c3755c0ca34ed > x86/mm: remove the linear mapping of the p2m tables. > > Mapping the p2m into the monitor tables was an important optimization > on 32-bit builds, where it avoided mapping and unmapping p2m pages > during a walk. On 64-bit it makes no difference -- see > http://old-list-archives.xen.org/archives/html/xen-devel/2010-04/msg00981.htmlIs that also going to remain true when we won''t be able to 1:1- map all of the memory anymore once we break the current 5Tb barrier? If not, it would probably be worthwhile keeping that code. Jan
Tim Deegan
2012-Sep-13 14:42 UTC
Re: [PATCH] x86/mm: remove the linear mapping of the p2m tables
At 15:23 +0100 on 13 Sep (1347549790), Jan Beulich wrote:> >>> On 13.09.12 at 16:01, Tim Deegan <tim@xen.org> wrote: > > # HG changeset patch > > # User Tim Deegan <tim@xen.org> > > # Date 1347544824 -3600 > > # Node ID a770d1c8448d73ccf2ec36a5322532c2e3c14641 > > # Parent 5691e4cc17da7fe8664a67f1d07c3755c0ca34ed > > x86/mm: remove the linear mapping of the p2m tables. > > > > Mapping the p2m into the monitor tables was an important optimization > > on 32-bit builds, where it avoided mapping and unmapping p2m pages > > during a walk. On 64-bit it makes no difference -- see > > http://old-list-archives.xen.org/archives/html/xen-devel/2010-04/msg00981.html > > Is that also going to remain true when we won''t be able to 1:1- > map all of the memory anymore once we break the current 5Tb > barrier? If not, it would probably be worthwhile keeping that > code.Ah, 5TB is a smaller limit than I thought we had. Yes, better leave it alone, so. Though TBH finding some way to use a bit more virtual address space for Xen seems like a good idea anyway, since this won''t be the only place we''ll want to avoid TLB flushes. Tim.
Keir Fraser
2012-Sep-13 14:58 UTC
Re: [PATCH] x86/mm: remove the linear mapping of the p2m tables
On 13/09/2012 15:42, "Tim Deegan" <tim@xen.org> wrote:>> Is that also going to remain true when we won''t be able to 1:1- >> map all of the memory anymore once we break the current 5Tb >> barrier? If not, it would probably be worthwhile keeping that >> code. > > Ah, 5TB is a smaller limit than I thought we had. Yes, better leave it > alone, so. Though TBH finding some way to use a bit more virtual > address space for Xen seems like a good idea anyway, since this won''t be > the only place we''ll want to avoid TLB flushes.For HVM or PVH guests, where this HAP code would be used, clearly Xen can use all the virtual address space it wants. It will almost certainly make sense for Xen to have a 1:1 physical mapping of all memory when running such a guest, and only do mapcache type tricks when running legacy PV guests. -- Keir
Jan Beulich
2012-Sep-13 15:08 UTC
Re: [PATCH] x86/mm: remove the linear mapping of the p2m tables
>>> On 13.09.12 at 16:58, Keir Fraser <keir.xen@gmail.com> wrote: > On 13/09/2012 15:42, "Tim Deegan" <tim@xen.org> wrote: > >>> Is that also going to remain true when we won''t be able to 1:1- >>> map all of the memory anymore once we break the current 5Tb >>> barrier? If not, it would probably be worthwhile keeping that >>> code. >> >> Ah, 5TB is a smaller limit than I thought we had. Yes, better leave it >> alone, so. Though TBH finding some way to use a bit more virtual >> address space for Xen seems like a good idea anyway, since this won''t be >> the only place we''ll want to avoid TLB flushes. > > For HVM or PVH guests, where this HAP code would be used, clearly Xen can > use all the virtual address space it wants. It will almost certainly make > sense for Xen to have a 1:1 physical mapping of all memory when running such > a guest, and only do mapcache type tricks when running legacy PV guests.Yes, that''s the mode I indeed wanted to get to. Just that it''s not really clear to me (without having started at least the work of bumping the boundary) how intrusive those changes are going to be. Jan
Tim Deegan
2012-Sep-13 15:11 UTC
Re: [PATCH] x86/mm: remove the linear mapping of the p2m tables
At 15:58 +0100 on 13 Sep (1347551914), Keir Fraser wrote:> On 13/09/2012 15:42, "Tim Deegan" <tim@xen.org> wrote: > > >> Is that also going to remain true when we won''t be able to 1:1- > >> map all of the memory anymore once we break the current 5Tb > >> barrier? If not, it would probably be worthwhile keeping that > >> code. > > > > Ah, 5TB is a smaller limit than I thought we had. Yes, better leave it > > alone, so. Though TBH finding some way to use a bit more virtual > > address space for Xen seems like a good idea anyway, since this won''t be > > the only place we''ll want to avoid TLB flushes. > > For HVM or PVH guests, where this HAP code would be used, clearly Xen can > use all the virtual address space it wants. It will almost certainly make > sense for Xen to have a 1:1 physical mapping of all memory when running such > a guest, and only do mapcache type tricks when running legacy PV guests.This is also used for shadowed guests, including autotranslated PV guests, if anyone cares about them any more. I got the impression that they''re superseded by the pvh stuff; is that right? If that''s the case, then let''s commit to having a bigger 1-1 map on HVM guetst when the time comes to extend past 5TB, and remove this linear map after all. Tim.
Keir Fraser
2012-Sep-13 15:17 UTC
Re: [PATCH] x86/mm: remove the linear mapping of the p2m tables
On 13/09/2012 16:08, "Jan Beulich" <JBeulich@suse.com> wrote:>>>> On 13.09.12 at 16:58, Keir Fraser <keir.xen@gmail.com> wrote: >> On 13/09/2012 15:42, "Tim Deegan" <tim@xen.org> wrote: >> >>>> Is that also going to remain true when we won''t be able to 1:1- >>>> map all of the memory anymore once we break the current 5Tb >>>> barrier? If not, it would probably be worthwhile keeping that >>>> code. >>> >>> Ah, 5TB is a smaller limit than I thought we had. Yes, better leave it >>> alone, so. Though TBH finding some way to use a bit more virtual >>> address space for Xen seems like a good idea anyway, since this won''t be >>> the only place we''ll want to avoid TLB flushes. >> >> For HVM or PVH guests, where this HAP code would be used, clearly Xen can >> use all the virtual address space it wants. It will almost certainly make >> sense for Xen to have a 1:1 physical mapping of all memory when running such >> a guest, and only do mapcache type tricks when running legacy PV guests. > > Yes, that''s the mode I indeed wanted to get to. Just that it''s > not really clear to me (without having started at least the > work of bumping the boundary) how intrusive those changes > are going to be.Well, this is true. But almost regardless of the complexity, this is how we''re going to want to do it, and it does mean we won''t need the linear map. :) -- Keir> Jan >
Keir Fraser
2012-Sep-13 15:18 UTC
Re: [PATCH] x86/mm: remove the linear mapping of the p2m tables
On 13/09/2012 16:11, "Tim Deegan" <tim@xen.org> wrote:> At 15:58 +0100 on 13 Sep (1347551914), Keir Fraser wrote: >> On 13/09/2012 15:42, "Tim Deegan" <tim@xen.org> wrote: >> >>>> Is that also going to remain true when we won''t be able to 1:1- >>>> map all of the memory anymore once we break the current 5Tb >>>> barrier? If not, it would probably be worthwhile keeping that >>>> code. >>> >>> Ah, 5TB is a smaller limit than I thought we had. Yes, better leave it >>> alone, so. Though TBH finding some way to use a bit more virtual >>> address space for Xen seems like a good idea anyway, since this won''t be >>> the only place we''ll want to avoid TLB flushes. >> >> For HVM or PVH guests, where this HAP code would be used, clearly Xen can >> use all the virtual address space it wants. It will almost certainly make >> sense for Xen to have a 1:1 physical mapping of all memory when running such >> a guest, and only do mapcache type tricks when running legacy PV guests. > > This is also used for shadowed guests, including autotranslated PV > guests, if anyone cares about them any more. I got the impression that > they''re superseded by the pvh stuff; is that right?Auto-translated PV seems to be one of those unsupported things that never quite dies. With PVH just round the corner, let''s definitively call it dead. :)> If that''s the case, then let''s commit to having a bigger 1-1 map on HVM > guetst when the time comes to extend past 5TB, and remove this linear > map after all.I agree. -- Keir> Tim.
Jan Beulich
2012-Sep-13 15:36 UTC
Re: [PATCH] x86/mm: remove the linear mapping of the p2m tables
>>> On 13.09.12 at 17:17, Keir Fraser <keir.xen@gmail.com> wrote: > On 13/09/2012 16:08, "Jan Beulich" <JBeulich@suse.com> wrote: > >>>>> On 13.09.12 at 16:58, Keir Fraser <keir.xen@gmail.com> wrote: >>> On 13/09/2012 15:42, "Tim Deegan" <tim@xen.org> wrote: >>> >>>>> Is that also going to remain true when we won''t be able to 1:1- >>>>> map all of the memory anymore once we break the current 5Tb >>>>> barrier? If not, it would probably be worthwhile keeping that >>>>> code. >>>> >>>> Ah, 5TB is a smaller limit than I thought we had. Yes, better leave it >>>> alone, so. Though TBH finding some way to use a bit more virtual >>>> address space for Xen seems like a good idea anyway, since this won''t be >>>> the only place we''ll want to avoid TLB flushes. >>> >>> For HVM or PVH guests, where this HAP code would be used, clearly Xen can >>> use all the virtual address space it wants. It will almost certainly make >>> sense for Xen to have a 1:1 physical mapping of all memory when running such >>> a guest, and only do mapcache type tricks when running legacy PV guests. >> >> Yes, that''s the mode I indeed wanted to get to. Just that it''s >> not really clear to me (without having started at least the >> work of bumping the boundary) how intrusive those changes >> are going to be. > > Well, this is true. But almost regardless of the complexity, this is how > we''re going to want to do it, and it does mean we won''t need the linear map. > :)Agreed. Jan
Konrad Rzeszutek Wilk
2012-Sep-13 20:43 UTC
Re: [PATCH] x86/mm: remove the linear mapping of the p2m tables
On Thu, Sep 13, 2012 at 11:18 AM, Keir Fraser <keir.xen@gmail.com> wrote:> On 13/09/2012 16:11, "Tim Deegan" <tim@xen.org> wrote: > >> At 15:58 +0100 on 13 Sep (1347551914), Keir Fraser wrote: >>> On 13/09/2012 15:42, "Tim Deegan" <tim@xen.org> wrote: >>> >>>>> Is that also going to remain true when we won''t be able to 1:1- >>>>> map all of the memory anymore once we break the current 5Tb >>>>> barrier? If not, it would probably be worthwhile keeping that >>>>> code. >>>> >>>> Ah, 5TB is a smaller limit than I thought we had. Yes, better leave it >>>> alone, so. Though TBH finding some way to use a bit more virtual >>>> address space for Xen seems like a good idea anyway, since this won''t be >>>> the only place we''ll want to avoid TLB flushes. >>> >>> For HVM or PVH guests, where this HAP code would be used, clearly Xen can >>> use all the virtual address space it wants. It will almost certainly make >>> sense for Xen to have a 1:1 physical mapping of all memory when running such >>> a guest, and only do mapcache type tricks when running legacy PV guests. >> >> This is also used for shadowed guests, including autotranslated PV >> guests, if anyone cares about them any more. I got the impression that >> they''re superseded by the pvh stuff; is that right? > > Auto-translated PV seems to be one of those unsupported things that never > quite dies. With PVH just round the corner, let''s definitively call it dead. > :)I thought we discussed that we need this as backup for running older guests? BTW, the auto-xlat is what PVH is advertising to the PV guest. CC-ing Mukesh here> >> If that''s the case, then let''s commit to having a bigger 1-1 map on HVM >> guetst when the time comes to extend past 5TB, and remove this linear >> map after all. > > I agree.> > -- Keir > >> Tim. > > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xen.org > http://lists.xen.org/xen-devel >
Keir Fraser
2012-Sep-13 21:14 UTC
Re: [PATCH] x86/mm: remove the linear mapping of the p2m tables
On 13/09/2012 21:43, "Konrad Rzeszutek Wilk" <konrad@kernel.org> wrote:>> Auto-translated PV seems to be one of those unsupported things that never >> quite dies. With PVH just round the corner, let''s definitively call it dead. >> :) > > I thought we discussed that we need this as backup for running older guests?Don''t think so, since we''ll continue to run old guests as pure PV. There was a backup for running on older CPUs, and that was to allow PVH guests to run on shadow page tables. That''s a totally separate compatibility concern however.> BTW, the auto-xlat is what PVH is advertising to the PV guest.Yes, but it''s auto-xlat in an HVM container. Pure PV auto-xlat is what we''re looking to kill here. -- Keir> CC-ing Mukesh here
Konrad Rzeszutek Wilk
2012-Sep-14 20:31 UTC
Re: [PATCH] x86/mm: remove the linear mapping of the p2m tables
On Thu, Sep 13, 2012 at 5:14 PM, Keir Fraser <keir@xen.org> wrote:> On 13/09/2012 21:43, "Konrad Rzeszutek Wilk" <konrad@kernel.org> wrote: > >>> Auto-translated PV seems to be one of those unsupported things that never >>> quite dies. With PVH just round the corner, let''s definitively call it dead. >>> :) >> >> I thought we discussed that we need this as backup for running older guests? > > Don''t think so, since we''ll continue to run old guests as pure PV. > > There was a backup for running on older CPUs, and that was to allow PVH > guests to run on shadow page tables. That''s a totally separate compatibility > concern however.I am talking about the inverse. Running the "new" PV guests which do not have PV MMU enabled in them (since the PV MMU calls would not be necessary anymore) and running on non-NPT hardware. For that PV auto-xlat would be necessary.> >> BTW, the auto-xlat is what PVH is advertising to the PV guest. > > Yes, but it''s auto-xlat in an HVM container. Pure PV auto-xlat is what we''re > looking to kill here. > > -- Keir > >> CC-ing Mukesh here > > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xen.org > http://lists.xen.org/xen-devel >
Keir Fraser
2012-Sep-14 20:55 UTC
Re: [PATCH] x86/mm: remove the linear mapping of the p2m tables
On 14/09/2012 21:31, "Konrad Rzeszutek Wilk" <konrad@kernel.org> wrote:>> Don''t think so, since we''ll continue to run old guests as pure PV. >> >> There was a backup for running on older CPUs, and that was to allow PVH >> guests to run on shadow page tables. That''s a totally separate compatibility >> concern however. > > I am talking about the inverse. Running the "new" PV guests which do > not have PV MMU > enabled in them (since the PV MMU calls would not be necessary > anymore) and running > on non-NPT hardware. > > For that PV auto-xlat would be necessary.These ''new'' PV guests are PVH guests. We''re talking about removing PV auto-xlat, not disallowing PVH auto-xlat. Subtle difference, but PVH will always run in an HVM container. -- Keir