Ian Campbell
2013-Dec-02 11:11 UTC
[PATCH] xen: arm: avoid truncation in mfn to paddr conversions
Although MFNs are 64-bit in the hypercall ABI they are most often unsigned long internally, and therefore be 32-bit on arm32. Physical addresses are always 64-bit via paddr_t. This means that the common "mfn << PAGE_SHIFT" pattern risks losing some of the top bits of the address is high enough. This need not imply a high amount of RAM, just a sparse physical address map. The correct form is ((paddr_t)mfn)<<PAGE_SHIFT and we have the pfn_to_paddr macro which implements this. Grep for PAGE_SHIFT and << and switch to the macro everywhere we can in the arch specific code. Note that page.h is included by mm.h which defines the macro and so remains with the open coded cast. I have inspected the common code matching this pattern and it uses the correct casts where necessary (x86 also has pfn_to_paddr, so as a further cleanup we could fix the common code too, but I haven''t done that here). I observed this as failure to boot a guest on midway, due to trying to map a foreign page which belonged to no guest. I think this likely explains the crashes which Julien has seen too. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- xen/arch/arm/domain_build.c | 4 ++-- xen/arch/arm/mm.c | 4 ++-- xen/arch/arm/p2m.c | 16 +++++++++------- xen/arch/arm/setup.c | 8 ++++---- xen/include/asm-arm/mm.h | 4 ++-- 5 files changed, 19 insertions(+), 17 deletions(-) diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c index 90fe88e..6c2fd3c 100644 --- a/xen/arch/arm/domain_build.c +++ b/xen/arch/arm/domain_build.c @@ -84,8 +84,8 @@ static void allocate_memory_11(struct domain *d, struct kernel_info *kinfo) panic("Failed to allocate contiguous memory for dom0\n"); spfn = page_to_mfn(pg); - start = spfn << PAGE_SHIFT; - size = (1 << order) << PAGE_SHIFT; + start = pfn_to_paddr(spfn); + size = pfn_to_paddr((1 << order)); // 1:1 mapping printk("Populate P2M %#"PRIx64"->%#"PRIx64" (1:1 mapping for dom0)\n", diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c index 2de7dc7..8189915 100644 --- a/xen/arch/arm/mm.c +++ b/xen/arch/arm/mm.c @@ -1032,10 +1032,10 @@ static int xenmem_add_to_physmap_one( return rc; } - maddr = p2m_lookup(od, idx << PAGE_SHIFT); + maddr = p2m_lookup(od, pfn_to_paddr(idx)); if ( maddr == INVALID_PADDR ) { - dump_p2m_lookup(od, idx << PAGE_SHIFT); + dump_p2m_lookup(od, pfn_to_paddr(idx)); rcu_unlock_domain(od); return -EINVAL; } diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c index af32511..1d5c841 100644 --- a/xen/arch/arm/p2m.c +++ b/xen/arch/arm/p2m.c @@ -296,18 +296,20 @@ int guest_physmap_add_page(struct domain *d, unsigned long mfn, unsigned int page_order) { - return create_p2m_entries(d, INSERT, gpfn << PAGE_SHIFT, - (gpfn + (1<<page_order)) << PAGE_SHIFT, - mfn << PAGE_SHIFT, MATTR_MEM); + return create_p2m_entries(d, INSERT, + pfn_to_paddr(gpfn), + pfn_to_paddr(gpfn + (1<<page_order)), + pfn_to_paddr(mfn), MATTR_MEM); } void guest_physmap_remove_page(struct domain *d, unsigned long gpfn, unsigned long mfn, unsigned int page_order) { - create_p2m_entries(d, REMOVE, gpfn << PAGE_SHIFT, - (gpfn + (1<<page_order)) << PAGE_SHIFT, - mfn << PAGE_SHIFT, MATTR_MEM); + create_p2m_entries(d, REMOVE, + pfn_to_paddr(gpfn), + pfn_to_paddr(gpfn + (1<<page_order)), + pfn_to_paddr(mfn), MATTR_MEM); } int p2m_alloc_table(struct domain *d) @@ -450,7 +452,7 @@ err: unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn) { - paddr_t p = p2m_lookup(d, gpfn << PAGE_SHIFT); + paddr_t p = p2m_lookup(d, pfn_to_paddr(gpfn)); return p >> PAGE_SHIFT; } diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c index 6834813..e640d81 100644 --- a/xen/arch/arm/setup.c +++ b/xen/arch/arm/setup.c @@ -456,7 +456,7 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) { /* xenheap is always in the initial contiguous region */ e = consider_modules(contig_start, contig_end, - xenheap_pages<<PAGE_SHIFT, + pfn_to_paddr(xenheap_pages), 32<<20, 0); if ( e ) break; @@ -470,7 +470,7 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) domheap_pages = heap_pages - xenheap_pages; early_printk("Xen heap: %"PRIpaddr"-%"PRIpaddr" (%lu pages)\n", - e - (xenheap_pages << PAGE_SHIFT), e, + e - (pfn_to_paddr(xenheap_pages)), e, xenheap_pages); early_printk("Dom heap: %lu pages\n", domheap_pages); @@ -517,8 +517,8 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) e = bank_end; /* Avoid the xenheap */ - if ( s < ((xenheap_mfn_start+xenheap_pages) << PAGE_SHIFT) - && (xenheap_mfn_start << PAGE_SHIFT) < e ) + if ( s < pfn_to_paddr(xenheap_mfn_start+xenheap_pages) + && pfn_to_paddr(xenheap_mfn_start) < e ) { e = pfn_to_paddr(xenheap_mfn_start); n = pfn_to_paddr(xenheap_mfn_start+xenheap_pages); diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h index ce66099..b8d4e7d 100644 --- a/xen/include/asm-arm/mm.h +++ b/xen/include/asm-arm/mm.h @@ -123,8 +123,8 @@ extern unsigned long xenheap_virt_end; #endif #define is_xen_fixed_mfn(mfn) \ - ((((mfn) << PAGE_SHIFT) >= virt_to_maddr(&_start)) && \ - (((mfn) << PAGE_SHIFT) <= virt_to_maddr(&_end))) + ((pfn_to_paddr(mfn) >= virt_to_maddr(&_start)) && \ + (pfn_to_paddr(mfn) <= virt_to_maddr(&_end))) #define page_get_owner(_p) (_p)->v.inuse.domain #define page_set_owner(_p,_d) ((_p)->v.inuse.domain = (_d)) -- 1.7.10.4
Julien Grall
2013-Dec-02 12:54 UTC
Re: [PATCH] xen: arm: avoid truncation in mfn to paddr conversions
On 12/02/2013 11:11 AM, Ian Campbell wrote:> Although MFNs are 64-bit in the hypercall ABI they are most often unsigned > long internally, and therefore be 32-bit on arm32. Physical addresses are > always 64-bit via paddr_t. > > This means that the common "mfn << PAGE_SHIFT" pattern risks losing some of > the top bits of the address is high enough. This need not imply a high amount > of RAM, just a sparse physical address map. > > The correct form is ((paddr_t)mfn)<<PAGE_SHIFT and we have the pfn_to_paddr > macro which implements this. Grep for PAGE_SHIFT and << and switch to the > macro everywhere we can in the arch specific code. Note that page.h is > included by mm.h which defines the macro and so remains with the open coded > cast. I have inspected the common code matching this pattern and it uses the > correct casts where necessary (x86 also has pfn_to_paddr, so as a further > cleanup we could fix the common code too, but I haven''t done that here). > > I observed this as failure to boot a guest on midway, due to trying to map a > foreign page which belonged to no guest. I think this likely explains the > crashes which Julien has seen too.It fixes the crash on midway, thanks! Acked-by: Julien Grall <julien.grall@linaro.org>> Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > --- > xen/arch/arm/domain_build.c | 4 ++-- > xen/arch/arm/mm.c | 4 ++-- > xen/arch/arm/p2m.c | 16 +++++++++------- > xen/arch/arm/setup.c | 8 ++++---- > xen/include/asm-arm/mm.h | 4 ++-- > 5 files changed, 19 insertions(+), 17 deletions(-) > > diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c > index 90fe88e..6c2fd3c 100644 > --- a/xen/arch/arm/domain_build.c > +++ b/xen/arch/arm/domain_build.c > @@ -84,8 +84,8 @@ static void allocate_memory_11(struct domain *d, struct kernel_info *kinfo) > panic("Failed to allocate contiguous memory for dom0\n"); > > spfn = page_to_mfn(pg); > - start = spfn << PAGE_SHIFT; > - size = (1 << order) << PAGE_SHIFT; > + start = pfn_to_paddr(spfn); > + size = pfn_to_paddr((1 << order)); > > // 1:1 mapping > printk("Populate P2M %#"PRIx64"->%#"PRIx64" (1:1 mapping for dom0)\n", > diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c > index 2de7dc7..8189915 100644 > --- a/xen/arch/arm/mm.c > +++ b/xen/arch/arm/mm.c > @@ -1032,10 +1032,10 @@ static int xenmem_add_to_physmap_one( > return rc; > } > > - maddr = p2m_lookup(od, idx << PAGE_SHIFT); > + maddr = p2m_lookup(od, pfn_to_paddr(idx)); > if ( maddr == INVALID_PADDR ) > { > - dump_p2m_lookup(od, idx << PAGE_SHIFT); > + dump_p2m_lookup(od, pfn_to_paddr(idx)); > rcu_unlock_domain(od); > return -EINVAL; > } > diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c > index af32511..1d5c841 100644 > --- a/xen/arch/arm/p2m.c > +++ b/xen/arch/arm/p2m.c > @@ -296,18 +296,20 @@ int guest_physmap_add_page(struct domain *d, > unsigned long mfn, > unsigned int page_order) > { > - return create_p2m_entries(d, INSERT, gpfn << PAGE_SHIFT, > - (gpfn + (1<<page_order)) << PAGE_SHIFT, > - mfn << PAGE_SHIFT, MATTR_MEM); > + return create_p2m_entries(d, INSERT, > + pfn_to_paddr(gpfn), > + pfn_to_paddr(gpfn + (1<<page_order)), > + pfn_to_paddr(mfn), MATTR_MEM); > } > > void guest_physmap_remove_page(struct domain *d, > unsigned long gpfn, > unsigned long mfn, unsigned int page_order) > { > - create_p2m_entries(d, REMOVE, gpfn << PAGE_SHIFT, > - (gpfn + (1<<page_order)) << PAGE_SHIFT, > - mfn << PAGE_SHIFT, MATTR_MEM); > + create_p2m_entries(d, REMOVE, > + pfn_to_paddr(gpfn), > + pfn_to_paddr(gpfn + (1<<page_order)), > + pfn_to_paddr(mfn), MATTR_MEM); > } > > int p2m_alloc_table(struct domain *d) > @@ -450,7 +452,7 @@ err: > > unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn) > { > - paddr_t p = p2m_lookup(d, gpfn << PAGE_SHIFT); > + paddr_t p = p2m_lookup(d, pfn_to_paddr(gpfn)); > return p >> PAGE_SHIFT; > } > > diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c > index 6834813..e640d81 100644 > --- a/xen/arch/arm/setup.c > +++ b/xen/arch/arm/setup.c > @@ -456,7 +456,7 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) > { > /* xenheap is always in the initial contiguous region */ > e = consider_modules(contig_start, contig_end, > - xenheap_pages<<PAGE_SHIFT, > + pfn_to_paddr(xenheap_pages), > 32<<20, 0); > if ( e ) > break; > @@ -470,7 +470,7 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) > domheap_pages = heap_pages - xenheap_pages; > > early_printk("Xen heap: %"PRIpaddr"-%"PRIpaddr" (%lu pages)\n", > - e - (xenheap_pages << PAGE_SHIFT), e, > + e - (pfn_to_paddr(xenheap_pages)), e, > xenheap_pages); > early_printk("Dom heap: %lu pages\n", domheap_pages); > > @@ -517,8 +517,8 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) > e = bank_end; > > /* Avoid the xenheap */ > - if ( s < ((xenheap_mfn_start+xenheap_pages) << PAGE_SHIFT) > - && (xenheap_mfn_start << PAGE_SHIFT) < e ) > + if ( s < pfn_to_paddr(xenheap_mfn_start+xenheap_pages) > + && pfn_to_paddr(xenheap_mfn_start) < e ) > { > e = pfn_to_paddr(xenheap_mfn_start); > n = pfn_to_paddr(xenheap_mfn_start+xenheap_pages); > diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h > index ce66099..b8d4e7d 100644 > --- a/xen/include/asm-arm/mm.h > +++ b/xen/include/asm-arm/mm.h > @@ -123,8 +123,8 @@ extern unsigned long xenheap_virt_end; > #endif > > #define is_xen_fixed_mfn(mfn) \ > - ((((mfn) << PAGE_SHIFT) >= virt_to_maddr(&_start)) && \ > - (((mfn) << PAGE_SHIFT) <= virt_to_maddr(&_end))) > + ((pfn_to_paddr(mfn) >= virt_to_maddr(&_start)) && \ > + (pfn_to_paddr(mfn) <= virt_to_maddr(&_end))) > > #define page_get_owner(_p) (_p)->v.inuse.domain > #define page_set_owner(_p,_d) ((_p)->v.inuse.domain = (_d)) >-- Julien Grall
Stefano Stabellini
2013-Dec-02 13:14 UTC
Re: [PATCH] xen: arm: avoid truncation in mfn to paddr conversions
On Mon, 2 Dec 2013, Ian Campbell wrote:> Although MFNs are 64-bit in the hypercall ABI they are most often unsigned > long internally, and therefore be 32-bit on arm32. Physical addresses are > always 64-bit via paddr_t. > > This means that the common "mfn << PAGE_SHIFT" pattern risks losing some of > the top bits of the address is high enough. This need not imply a high amount > of RAM, just a sparse physical address map. > > The correct form is ((paddr_t)mfn)<<PAGE_SHIFT and we have the pfn_to_paddr > macro which implements this. Grep for PAGE_SHIFT and << and switch to the > macro everywhere we can in the arch specific code. Note that page.h is > included by mm.h which defines the macro and so remains with the open coded > cast. I have inspected the common code matching this pattern and it uses the > correct casts where necessary (x86 also has pfn_to_paddr, so as a further > cleanup we could fix the common code too, but I haven''t done that here). > > I observed this as failure to boot a guest on midway, due to trying to map a > foreign page which belonged to no guest. I think this likely explains the > crashes which Julien has seen too. > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>> xen/arch/arm/domain_build.c | 4 ++-- > xen/arch/arm/mm.c | 4 ++-- > xen/arch/arm/p2m.c | 16 +++++++++------- > xen/arch/arm/setup.c | 8 ++++---- > xen/include/asm-arm/mm.h | 4 ++-- > 5 files changed, 19 insertions(+), 17 deletions(-) > > diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c > index 90fe88e..6c2fd3c 100644 > --- a/xen/arch/arm/domain_build.c > +++ b/xen/arch/arm/domain_build.c > @@ -84,8 +84,8 @@ static void allocate_memory_11(struct domain *d, struct kernel_info *kinfo) > panic("Failed to allocate contiguous memory for dom0\n"); > > spfn = page_to_mfn(pg); > - start = spfn << PAGE_SHIFT; > - size = (1 << order) << PAGE_SHIFT; > + start = pfn_to_paddr(spfn); > + size = pfn_to_paddr((1 << order)); > > // 1:1 mapping > printk("Populate P2M %#"PRIx64"->%#"PRIx64" (1:1 mapping for dom0)\n", > diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c > index 2de7dc7..8189915 100644 > --- a/xen/arch/arm/mm.c > +++ b/xen/arch/arm/mm.c > @@ -1032,10 +1032,10 @@ static int xenmem_add_to_physmap_one( > return rc; > } > > - maddr = p2m_lookup(od, idx << PAGE_SHIFT); > + maddr = p2m_lookup(od, pfn_to_paddr(idx)); > if ( maddr == INVALID_PADDR ) > { > - dump_p2m_lookup(od, idx << PAGE_SHIFT); > + dump_p2m_lookup(od, pfn_to_paddr(idx)); > rcu_unlock_domain(od); > return -EINVAL; > } > diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c > index af32511..1d5c841 100644 > --- a/xen/arch/arm/p2m.c > +++ b/xen/arch/arm/p2m.c > @@ -296,18 +296,20 @@ int guest_physmap_add_page(struct domain *d, > unsigned long mfn, > unsigned int page_order) > { > - return create_p2m_entries(d, INSERT, gpfn << PAGE_SHIFT, > - (gpfn + (1<<page_order)) << PAGE_SHIFT, > - mfn << PAGE_SHIFT, MATTR_MEM); > + return create_p2m_entries(d, INSERT, > + pfn_to_paddr(gpfn), > + pfn_to_paddr(gpfn + (1<<page_order)), > + pfn_to_paddr(mfn), MATTR_MEM); > } > > void guest_physmap_remove_page(struct domain *d, > unsigned long gpfn, > unsigned long mfn, unsigned int page_order) > { > - create_p2m_entries(d, REMOVE, gpfn << PAGE_SHIFT, > - (gpfn + (1<<page_order)) << PAGE_SHIFT, > - mfn << PAGE_SHIFT, MATTR_MEM); > + create_p2m_entries(d, REMOVE, > + pfn_to_paddr(gpfn), > + pfn_to_paddr(gpfn + (1<<page_order)), > + pfn_to_paddr(mfn), MATTR_MEM); > } > > int p2m_alloc_table(struct domain *d) > @@ -450,7 +452,7 @@ err: > > unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn) > { > - paddr_t p = p2m_lookup(d, gpfn << PAGE_SHIFT); > + paddr_t p = p2m_lookup(d, pfn_to_paddr(gpfn)); > return p >> PAGE_SHIFT; > } > > diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c > index 6834813..e640d81 100644 > --- a/xen/arch/arm/setup.c > +++ b/xen/arch/arm/setup.c > @@ -456,7 +456,7 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) > { > /* xenheap is always in the initial contiguous region */ > e = consider_modules(contig_start, contig_end, > - xenheap_pages<<PAGE_SHIFT, > + pfn_to_paddr(xenheap_pages), > 32<<20, 0); > if ( e ) > break; > @@ -470,7 +470,7 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) > domheap_pages = heap_pages - xenheap_pages; > > early_printk("Xen heap: %"PRIpaddr"-%"PRIpaddr" (%lu pages)\n", > - e - (xenheap_pages << PAGE_SHIFT), e, > + e - (pfn_to_paddr(xenheap_pages)), e, > xenheap_pages); > early_printk("Dom heap: %lu pages\n", domheap_pages); > > @@ -517,8 +517,8 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) > e = bank_end; > > /* Avoid the xenheap */ > - if ( s < ((xenheap_mfn_start+xenheap_pages) << PAGE_SHIFT) > - && (xenheap_mfn_start << PAGE_SHIFT) < e ) > + if ( s < pfn_to_paddr(xenheap_mfn_start+xenheap_pages) > + && pfn_to_paddr(xenheap_mfn_start) < e ) > { > e = pfn_to_paddr(xenheap_mfn_start); > n = pfn_to_paddr(xenheap_mfn_start+xenheap_pages); > diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h > index ce66099..b8d4e7d 100644 > --- a/xen/include/asm-arm/mm.h > +++ b/xen/include/asm-arm/mm.h > @@ -123,8 +123,8 @@ extern unsigned long xenheap_virt_end; > #endif > > #define is_xen_fixed_mfn(mfn) \ > - ((((mfn) << PAGE_SHIFT) >= virt_to_maddr(&_start)) && \ > - (((mfn) << PAGE_SHIFT) <= virt_to_maddr(&_end))) > + ((pfn_to_paddr(mfn) >= virt_to_maddr(&_start)) && \ > + (pfn_to_paddr(mfn) <= virt_to_maddr(&_end))) > > #define page_get_owner(_p) (_p)->v.inuse.domain > #define page_set_owner(_p,_d) ((_p)->v.inuse.domain = (_d)) > -- > 1.7.10.4 >
Ian Campbell
2013-Dec-02 14:11 UTC
Re: [PATCH] xen: arm: avoid truncation in mfn to paddr conversions
On Mon, 2013-12-02 at 13:14 +0000, Stefano Stabellini wrote:> On Mon, 2 Dec 2013, Ian Campbell wrote: > > Although MFNs are 64-bit in the hypercall ABI they are most often unsigned > > long internally, and therefore be 32-bit on arm32. Physical addresses are > > always 64-bit via paddr_t. > > > > This means that the common "mfn << PAGE_SHIFT" pattern risks losing some of > > the top bits of the address is high enough. This need not imply a high amount > > of RAM, just a sparse physical address map. > > > > The correct form is ((paddr_t)mfn)<<PAGE_SHIFT and we have the pfn_to_paddr > > macro which implements this. Grep for PAGE_SHIFT and << and switch to the > > macro everywhere we can in the arch specific code. Note that page.h is > > included by mm.h which defines the macro and so remains with the open coded > > cast. I have inspected the common code matching this pattern and it uses the > > correct casts where necessary (x86 also has pfn_to_paddr, so as a further > > cleanup we could fix the common code too, but I haven''t done that here). > > > > I observed this as failure to boot a guest on midway, due to trying to map a > > foreign page which belonged to no guest. I think this likely explains the > > crashes which Julien has seen too. > > > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com> > > Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>Applied. I had to fix up a couple of rejects because I stupidly sent this patch based on "improve handling of system with non-contiguous RAM regions". They were trivially resolved though. Ian.