search for: page_mask

Displaying 20 results from an estimated 490 matches for "page_mask".

2018 Apr 18
1
[PATCH 1/2] qxl: fix qxl_release_{map,unmap}
s/PAGE_SIZE/PAGE_MASK/ Luckily release_offset is never larger than PAGE_SIZE, so the bug has no bad side effects and managed to stay unnoticed for years that way ... Signed-off-by: Gerd Hoffmann <kraxel at redhat.com> --- drivers/gpu/drm/qxl/qxl_ioctl.c | 4 ++-- drivers/gpu/drm/qxl/qxl_release.c | 6 +++---...
2012 Nov 30
1
[PATCH v2] arm: add few checks to gic_init
...%"PRIpaddr"\n" + " gic_vcpu_addr=%"PRIpaddr"\n", + early_info.gic.gic_dist_addr, early_info.gic.gic_cpu_addr, + early_info.gic.gic_hyp_addr, early_info.gic.gic_vcpu_addr); + if ( (early_info.gic.gic_dist_addr & ~PAGE_MASK) || + (early_info.gic.gic_cpu_addr & ~PAGE_MASK) || + (early_info.gic.gic_hyp_addr & ~PAGE_MASK) || + (early_info.gic.gic_vcpu_addr & ~PAGE_MASK) ) + panic("error: GIC interfaces not page aligned.\n"); + gic.dbase = early_info.gic.gic_dist_a...
2005 Mar 14
4
[patch/unstable] page table cleanups
...spl1e = (l1_pgentry_t *)map_domain_mem(smfn << PAGE_SHIFT); + spl1e = map_domain_mem(smfn << PAGE_SHIFT); *prev_smfn_ptr = smfn; *prev_spl1e_ptr = spl1e; } l1pte_propagate_from_guest(current->domain, &gpte, &spte); - spl1e[(pa & ~PAGE_MASK) / sizeof(l1_pgentry_t)] = mk_l1_pgentry(spte); + spl1e[(pa & ~PAGE_MASK) / sizeof(l1_pgentry_t)] = spte; } -void shadow_l2_normal_pt_update(unsigned long pa, unsigned long gpde) +void shadow_l2_normal_pt_update(unsigned long pa, l2_pgentry_t gpde) { - unsigned long sl2mfn, spde = 0;...
2009 Aug 06
2
[PATCH] hvm emul: fix cmpxchg emulation to use an atomic operation
..._to_mfn(v, vaddr); + if ( !mfn_valid(mfn_x(mfn1)) ) + return ((mfn_x(mfn1) == BAD_GVA_TO_GFN) ? + MAPPING_EXCEPTION : + (mfn_x(mfn1) == READONLY_GFN) ? + MAPPING_SILENT_FAIL : MAPPING_UNHANDLEABLE); + + if ( likely(((vaddr + bytes - 1) & PAGE_MASK) == (vaddr & PAGE_MASK)) ) + { + /* Whole write fits on a single page */ + mfn2 = _mfn(INVALID_MFN); + map = map_domain_page(mfn_x(mfn1)) + (vaddr & ~PAGE_MASK); + } + else + { + /* Cross-page emulated writes are only supported for HVM guests; +...
2019 Aug 13
1
[RFC PATCH v6 75/92] kvm: x86: disable gpa_available optimization in emulator_read_write_onepage()
...* operation using rep will only have the initial GPA from the NPF > * occurred. > */ > - if (vcpu->arch.gpa_available && > + if (vcpu->arch.gpa_available && !kvmi_is_present() && > emulator_can_use_gpa(ctxt) && > (addr & ~PAGE_MASK) == (vcpu->arch.gpa_val & ~PAGE_MASK)) { > gpa = vcpu->arch.gpa_val; >
2017 Nov 27
0
[PATCH] x86/mm/kmmio: Fix mmiotrace for page unaligned addresses
...--git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 34f0e1847dd6..5d4c358778dd 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -349,11 +349,11 @@ void iounmap(volatile void __iomem *addr) return; } + mmiotrace_iounmap(addr); + addr = (volatile void __iomem *) (PAGE_MASK & (unsigned long __force)addr); - mmiotrace_iounmap(addr); - /* Use the vm area unlocked, assuming the caller ensures there isn't another iounmap for the same address in parallel. Reuse of the virtual address is prevented by diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio...
2018 Feb 23
0
[PATCH 4.14 148/159] x86/mm/kmmio: Fix mmiotrace for page unaligned addresses
...- arch/x86/mm/kmmio.c | 12 +++++++----- 2 files changed, 9 insertions(+), 7 deletions(-) --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -349,11 +349,11 @@ void iounmap(volatile void __iomem *addr return; } + mmiotrace_iounmap(addr); + addr = (volatile void __iomem *) (PAGE_MASK & (unsigned long __force)addr); - mmiotrace_iounmap(addr); - /* Use the vm area unlocked, assuming the caller ensures there isn't another iounmap for the same address in parallel. Reuse of the virtual address is prevented by --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c...
2018 Feb 23
0
[PATCH 3.18 54/58] x86/mm/kmmio: Fix mmiotrace for page unaligned addresses
...ed, 9 insertions(+), 7 deletions(-) --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -296,11 +296,11 @@ void iounmap(volatile void __iomem *addr (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) return; + mmiotrace_iounmap(addr); + addr = (volatile void __iomem *) (PAGE_MASK & (unsigned long __force)addr); - mmiotrace_iounmap(addr); - /* Use the vm area unlocked, assuming the caller ensures there isn't another iounmap for the same address in parallel. Reuse of the virtual address is prevented by --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c...
2018 Feb 23
0
[PATCH 4.4 059/193] x86/mm/kmmio: Fix mmiotrace for page unaligned addresses
...ed, 9 insertions(+), 7 deletions(-) --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -348,11 +348,11 @@ void iounmap(volatile void __iomem *addr (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) return; + mmiotrace_iounmap(addr); + addr = (volatile void __iomem *) (PAGE_MASK & (unsigned long __force)addr); - mmiotrace_iounmap(addr); - /* Use the vm area unlocked, assuming the caller ensures there isn't another iounmap for the same address in parallel. Reuse of the virtual address is prevented by --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c...
2018 Feb 23
0
[PATCH 4.9 083/145] x86/mm/kmmio: Fix mmiotrace for page unaligned addresses
...ed, 9 insertions(+), 7 deletions(-) --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -347,11 +347,11 @@ void iounmap(volatile void __iomem *addr (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) return; + mmiotrace_iounmap(addr); + addr = (volatile void __iomem *) (PAGE_MASK & (unsigned long __force)addr); - mmiotrace_iounmap(addr); - /* Use the vm area unlocked, assuming the caller ensures there isn't another iounmap for the same address in parallel. Reuse of the virtual address is prevented by --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c...
2016 Mar 03
1
RFC: [PATCH] x86/kmmio: fix mmiotrace for hugepages
...read lock. */ -static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page) +static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr) { struct list_head *head; struct kmmio_fault_page *f; + unsigned int l; + pte_t *pte = lookup_address(addr, &l); - page &= PAGE_MASK; - head = kmmio_page_list(page); + if (!pte) + return NULL; + addr &= page_level_mask(l); + head = kmmio_page_list(addr); list_for_each_entry_rcu(f, head, list) { - if (f->page == page) + if (f->addr == addr) return f; } return NULL; @@ -137,10 +148,10 @@ static void clear_pt...
2013 Dec 04
5
[PATCH] coverity: Store the modelling file in the source tree.
...signifies that the + * variable could be either NULL or have some data. + * + * Coverity Scan doesn''t pick up modifications automatically. The model file + * must be uploaded by an admin in the analysis. + */ + +/* Definitions */ +#define NULL (void *)0 +#define PAGE_SIZE 4096UL +#define PAGE_MASK (~(PAGE_SIZE-1)) + +#define assert(cond) /* empty */ +#define page_to_mfn(p) (unsigned long)(p) + +struct page_info {}; + +/* + * map_domain_page() takes an existing domain page and possibly maps it into + * the Xen pagetables, to allow for direct access. Model this as a memory + * allocation of e...
2018 Jan 28
0
[PATCH AUTOSEL for 4.14 095/100] x86/mm/kmmio: Fix mmiotrace for page unaligned addresses
...--git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 34f0e1847dd6..5d4c358778dd 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -349,11 +349,11 @@ void iounmap(volatile void __iomem *addr) return; } + mmiotrace_iounmap(addr); + addr = (volatile void __iomem *) (PAGE_MASK & (unsigned long __force)addr); - mmiotrace_iounmap(addr); - /* Use the vm area unlocked, assuming the caller ensures there isn't another iounmap for the same address in parallel. Reuse of the virtual address is prevented by diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio...
2018 Jan 28
0
[PATCH AUTOSEL for 4.4 34/36] x86/mm/kmmio: Fix mmiotrace for page unaligned addresses
...x b9c78f3bcd67..53ab3f367472 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -348,11 +348,11 @@ void iounmap(volatile void __iomem *addr) (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) return; + mmiotrace_iounmap(addr); + addr = (volatile void __iomem *) (PAGE_MASK & (unsigned long __force)addr); - mmiotrace_iounmap(addr); - /* Use the vm area unlocked, assuming the caller ensures there isn't another iounmap for the same address in parallel. Reuse of the virtual address is prevented by diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio...
2018 Jan 28
0
[PATCH AUTOSEL for 4.9 46/49] x86/mm/kmmio: Fix mmiotrace for page unaligned addresses
...x 7aaa2635862d..ecae9ac216fa 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -347,11 +347,11 @@ void iounmap(volatile void __iomem *addr) (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) return; + mmiotrace_iounmap(addr); + addr = (volatile void __iomem *) (PAGE_MASK & (unsigned long __force)addr); - mmiotrace_iounmap(addr); - /* Use the vm area unlocked, assuming the caller ensures there isn't another iounmap for the same address in parallel. Reuse of the virtual address is prevented by diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio...
2018 Jan 28
0
[PATCH AUTOSEL for 3.18 23/25] x86/mm/kmmio: Fix mmiotrace for page unaligned addresses
...x af78e50ca6ce..c76ea35fa19f 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -296,11 +296,11 @@ void iounmap(volatile void __iomem *addr) (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) return; + mmiotrace_iounmap(addr); + addr = (volatile void __iomem *) (PAGE_MASK & (unsigned long __force)addr); - mmiotrace_iounmap(addr); - /* Use the vm area unlocked, assuming the caller ensures there isn't another iounmap for the same address in parallel. Reuse of the virtual address is prevented by diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio...
2020 Aug 27
2
[PATCH] nouveau: fix the start/end range for migration
...@@ nouveau_svmm_bind(struct drm_device *dev, void *data, struct nouveau_cli *cli = nouveau_cli(file_priv); struct drm_nouveau_svm_bind *args = data; unsigned target, cmd, priority; - unsigned long addr, end, size; + unsigned long addr, end; struct mm_struct *mm; args->va_start &= PAGE_MASK; - args->va_end &= PAGE_MASK; + args->va_end = ALIGN(args->va_end, PAGE_SIZE); + /* If no end address is given, assume a single page. */ + if (args->va_end == 0) + args->va_end = args->va_start + PAGE_SIZE; /* Sanity check arguments */ if (args->reserved0 || args-&gt...
2015 Dec 07
0
[PATCH RFC 1/3] xen: export xen_phys_to_bus, xen_bus_to_phys and xen_virt_to_bus
..._addr_t is 64bit leading to a loss in - * information if the shift is done before casting to 64bit. - */ -static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr) -{ - unsigned long bfn = pfn_to_bfn(PFN_DOWN(paddr)); - dma_addr_t dma = (dma_addr_t)bfn << PAGE_SHIFT; - - dma |= paddr & ~PAGE_MASK; - - return dma; -} - -static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr) -{ - unsigned long pfn = bfn_to_pfn(PFN_DOWN(baddr)); - dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT; - phys_addr_t paddr = dma; - - paddr |= baddr & ~PAGE_MASK; - - return paddr; -} - -static inline dma_...
2016 Mar 15
2
[PATCH v1 11/19] zsmalloc: squeeze freelist into page->mapping
...L; > - } > + ofs = obj_idx * class->size; > + cursor = first_page; > + nr_page = ofs >> PAGE_SHIFT; > > - obj = page_to_pfn(page) << OBJ_INDEX_BITS; > - obj |= ((obj_idx) & OBJ_INDEX_MASK); > - obj <<= OBJ_TAG_BITS; > + *ofs_in_page = ofs & ~PAGE_MASK; > + > + for (i = 0; i < nr_page; i++) > + cursor = get_next_page(cursor); > > - return (void *)obj; > + *obj_page = cursor; > }
2016 Mar 15
2
[PATCH v1 11/19] zsmalloc: squeeze freelist into page->mapping
...L; > - } > + ofs = obj_idx * class->size; > + cursor = first_page; > + nr_page = ofs >> PAGE_SHIFT; > > - obj = page_to_pfn(page) << OBJ_INDEX_BITS; > - obj |= ((obj_idx) & OBJ_INDEX_MASK); > - obj <<= OBJ_TAG_BITS; > + *ofs_in_page = ofs & ~PAGE_MASK; > + > + for (i = 0; i < nr_page; i++) > + cursor = get_next_page(cursor); > > - return (void *)obj; > + *obj_page = cursor; > }