search for: pfn_pte

Displaying 20 results from an estimated 109 matches for "pfn_pte".

2009 Sep 21
1
[PATCH 2/5] lguest: use set_pte/set_pmd uniformly for real page table entries
...CHER_PMD_INDEX], switcher_pmd); #else pgd_t switcher_pgd; @@ -1179,10 +1176,8 @@ void map_switcher_in_guest(struct lg_cpu * page is already mapped there, we don't have to copy them out * again. */ - pfn = __pa(cpu->regs_page) >> PAGE_SHIFT; - native_set_pte(&regs_pte, pfn_pte(pfn, PAGE_KERNEL)); - native_set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], - regs_pte); + regs_pte = pfn_pte(__pa(cpu->regs_page) >> PAGE_SHIFT, PAGE_KERNEL); + set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], regs_pte); } /*:*/ @@ -1209,7 +1204,7 @...
2009 Sep 21
1
[PATCH 2/5] lguest: use set_pte/set_pmd uniformly for real page table entries
...CHER_PMD_INDEX], switcher_pmd); #else pgd_t switcher_pgd; @@ -1179,10 +1176,8 @@ void map_switcher_in_guest(struct lg_cpu * page is already mapped there, we don't have to copy them out * again. */ - pfn = __pa(cpu->regs_page) >> PAGE_SHIFT; - native_set_pte(&regs_pte, pfn_pte(pfn, PAGE_KERNEL)); - native_set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], - regs_pte); + regs_pte = pfn_pte(__pa(cpu->regs_page) >> PAGE_SHIFT, PAGE_KERNEL); + set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], regs_pte); } /*:*/ @@ -1209,7 +1204,7 @...
2005 Jun 23
1
[patch] pin/unpin must flush tlb
...86/mm/pgtable.c 2005-06-22 16:25:17.000000000 +0200 +++ linux-2.6.11/arch/xen/i386/mm/pgtable.c 2005-06-23 18:20:45.000000000 +0200 @@ -486,7 +486,8 @@ void mm_pin(struct mm_struct *mm) mm_walk(mm, PAGE_KERNEL_RO); HYPERVISOR_update_va_mapping( (unsigned long)mm->pgd, - pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL_RO), 0); + pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL_RO), + UVMF_TLB_FLUSH); xen_pgd_pin(__pa(mm->pgd)); mm->context.pinned = 1; spin_lock(&mm_unpinned_lock); @@ -505,6 +506,7 @@ void mm_un...
2007 Apr 18
1
[PATCH 1/5] Add pagetable allocation notifiers
...@ -52,8 +52,9 @@ static struct page *split_large_page(uns address = __pa(address); addr = address & LARGE_PAGE_MASK; pbase = (pte_t *)page_address(base); + SetPagePTE(virt_to_page(pbase)); for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { - set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, + set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, addr == address ? prot : PAGE_KERNEL)); } return base; @@ -146,6 +147,7 @@ __change_page_attr(struct page *page, pg BUG_ON(!page_count(kpte_page)); if (cpu_ha...
2007 Apr 18
1
[PATCH 1/5] Add pagetable allocation notifiers
...@ -52,8 +52,9 @@ static struct page *split_large_page(uns address = __pa(address); addr = address & LARGE_PAGE_MASK; pbase = (pte_t *)page_address(base); + SetPagePTE(virt_to_page(pbase)); for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { - set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, + set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, addr == address ? prot : PAGE_KERNEL)); } return base; @@ -146,6 +147,7 @@ __change_page_attr(struct page *page, pg BUG_ON(!page_count(kpte_page)); if (cpu_ha...
2007 Apr 18
0
[PATCH 1/2] Whitespace cleanup in pageattr.c
...truct page *split_large_page(unsigned long address, pgprot_t prot) @@ -54,8 +54,8 @@ static struct page *split_large_page(uns pbase = (pte_t *)page_address(base); SetPagePTE(virt_to_page(pbase)); for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { - set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, - addr == address ? prot : PAGE_KERNEL)); + set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, + addr == address ? prot : PAGE_KERNEL)); } return base; }
2007 Apr 18
0
[PATCH 1/2] Whitespace cleanup in pageattr.c
...truct page *split_large_page(unsigned long address, pgprot_t prot) @@ -54,8 +54,8 @@ static struct page *split_large_page(uns pbase = (pte_t *)page_address(base); SetPagePTE(virt_to_page(pbase)); for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { - set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, - addr == address ? prot : PAGE_KERNEL)); + set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, + addr == address ? prot : PAGE_KERNEL)); } return base; }
2013 Dec 04
5
[PATCH] arm: xen: foreign mapping PTEs are special.
...--- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -96,7 +96,7 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, struct remap_data *info = data; struct page *page = info->pages[info->index++]; unsigned long pfn = page_to_pfn(page); - pte_t pte = pfn_pte(pfn, info->prot); + pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot)); if (map_foreign_page(pfn, info->fgmfn, info->domid)) return -EFAULT; -- 1.7.10.4
2007 Jan 10
1
[PATCH] linux/i386: allow CONFIG_HIGHPTE on i386 (take 2)
...set_page_count(pte, 1); } -#endif return pte; } void pte_free(struct page *pte) { - unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT); + unsigned long pfn = page_to_pfn(pte); - if (!pte_write(*virt_to_ptep(va))) - BUG_ON(HYPERVISOR_update_va_mapping( - va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0)); + if (!PageHighMem(pte)) { + unsigned long va = (unsigned long)__va(pfn << PAGE_SHIFT); + + if (!pte_write(*virt_to_ptep(va))) + BUG_ON(HYPERVISOR_update_va_mapping( + va, pfn_pte(pfn, PAGE_KERNEL), 0)); + } else { + struct mmuext_op op; + +...
2009 Apr 16
1
NULL pointer dereference at __switch_to() ( __unlazy_fpu ) with lguest PAE patch
.... When we're running the Guest, * we want the Guest's "regs" page to appear where the first Switcher @@ -727,7 +1058,8 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) * again. */ pfn = __pa(cpu->regs_page) >> PAGE_SHIFT; regs_pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL)); - switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTRS_PER_PTE] = regs_pte; + switcher_pte_page[(unsigned long)pages / PAGE_SIZE % PTRS_PER_PTE] + = regs_pte; } /*:*/ @@ -752,21 +1084,23 @@ static __init void populate_switcher_pte_page(unsigned int cpu,...
2009 Apr 16
1
NULL pointer dereference at __switch_to() ( __unlazy_fpu ) with lguest PAE patch
.... When we're running the Guest, * we want the Guest's "regs" page to appear where the first Switcher @@ -727,7 +1058,8 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) * again. */ pfn = __pa(cpu->regs_page) >> PAGE_SHIFT; regs_pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL)); - switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTRS_PER_PTE] = regs_pte; + switcher_pte_page[(unsigned long)pages / PAGE_SIZE % PTRS_PER_PTE] + = regs_pte; } /*:*/ @@ -752,21 +1084,23 @@ static __init void populate_switcher_pte_page(unsigned int cpu,...
2007 Apr 18
0
[RFC PATCH 31/35] Add Xen grant table support
> > +#ifndef __ia64__ > > +static int map_pte_fn(pte_t *pte, struct page *pte_page, > > + unsigned long addr, void *data) > > +{ > > + unsigned long **frames = (unsigned long **)data; > > + > > + set_pte_at(&init_mm, addr, pte, pfn_pte((*frames)[0], > PAGE_KERNEL)); > > + (*frames)++; > > + return 0; > > +} > > looks to me the wrong ifdef for a file in arch/i386... please fix FYI, the grant table support is also used by non-x86 Xen architectures (currently ia64 and soon ppc) so grant table files (alo...
2007 Apr 18
0
[RFC PATCH 31/35] Add Xen grant table support
> > +#ifndef __ia64__ > > +static int map_pte_fn(pte_t *pte, struct page *pte_page, > > + unsigned long addr, void *data) > > +{ > > + unsigned long **frames = (unsigned long **)data; > > + > > + set_pte_at(&init_mm, addr, pte, pfn_pte((*frames)[0], > PAGE_KERNEL)); > > + (*frames)++; > > + return 0; > > +} > > looks to me the wrong ifdef for a file in arch/i386... please fix FYI, the grant table support is also used by non-x86 Xen architectures (currently ia64 and soon ppc) so grant table files (alo...
2007 Apr 18
0
[PATCH 5/9] 00mm6 kpte flush.patch
..._flush_tlb_one(vaddr); -#endif + kpte_clear_flush(kmap_pte-idx, vaddr); dec_preempt_count(); preempt_check_resched(); @@ -94,7 +91,6 @@ void *kmap_atomic_pfn(unsigned long pfn, idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); - __flush_tlb_one(vaddr); return (void*) vaddr; } =================================================================== --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h @@ -441,6 +441,13 @@ extern pte_t *lookup_address(unsigned lo #define pte_unmap_nested(pte)...
2009 Jun 05
1
[PATCH] lguest: PAE support
...CPU is. This is an optimization: when the Switcher @@ -726,8 +1001,9 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) * page is already mapped there, we don't have to copy them out * again. */ pfn = __pa(cpu->regs_page) >> PAGE_SHIFT; - regs_pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL)); - switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTRS_PER_PTE] = regs_pte; + native_set_pte(&regs_pte, pfn_pte(pfn, PAGE_KERNEL)); + native_set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], + regs_pte); } /*:*/ -- 1.6.0.4
2009 Jun 05
1
[PATCH] lguest: PAE support
...CPU is. This is an optimization: when the Switcher @@ -726,8 +1001,9 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) * page is already mapped there, we don't have to copy them out * again. */ pfn = __pa(cpu->regs_page) >> PAGE_SHIFT; - regs_pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL)); - switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTRS_PER_PTE] = regs_pte; + native_set_pte(&regs_pte, pfn_pte(pfn, PAGE_KERNEL)); + native_set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], + regs_pte); } /*:*/ -- 1.6.0.4
2007 Oct 31
5
[PATCH 0/7] (Re-)introducing pvops for x86_64 - Real pvops work part
Hey folks, This is the part-of-pvops-implementation-that-is-not-exactly-a-merge. Neat, uh? This is the majority of the work. The first patch in the series does not really belong here. It was already sent to lkml separetedly before, but I'm including it again, for a very simple reason: Try to test the paravirt patches without it, and you'll fail miserably ;-) (and it was not yet
2007 Oct 31
5
[PATCH 0/7] (Re-)introducing pvops for x86_64 - Real pvops work part
Hey folks, This is the part-of-pvops-implementation-that-is-not-exactly-a-merge. Neat, uh? This is the majority of the work. The first patch in the series does not really belong here. It was already sent to lkml separetedly before, but I'm including it again, for a very simple reason: Try to test the paravirt patches without it, and you'll fail miserably ;-) (and it was not yet
2011 Jul 21
51
Linux Stubdom Problem
2011/7/19 Stefano Stabellini <stefano.stabellini@eu.citrix.com>: > CC''ing Tim and xen-devel > > On Mon, 18 Jul 2011, Jiageng Yu wrote: >> 2011/7/16 Stefano Stabellini <stefano.stabellini@eu.citrix.com>: >> > On Fri, 15 Jul 2011, Jiageng Yu wrote: >> >> 2011/7/15 Jiageng Yu <yujiageng734@gmail.com>: >> >> > 2011/7/15
2020 Nov 03
0
[patch V3 13/37] mips/mm/highmem: Switch to generic kmap atomic
...ed with it. - */ -void *kmap_atomic_pfn(unsigned long pfn) -{ - unsigned long vaddr; - int idx, type; - - preempt_disable(); - pagefault_disable(); - - type = kmap_atomic_idx_push(); - idx = type + KM_TYPE_NR*smp_processor_id(); - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); - flush_tlb_one(vaddr); - - return (void*) vaddr; -} - -void __init kmap_init(void) -{ - unsigned long kmap_vstart; - - /* cache the first kmap pte */ - kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); - kmap_pte = virt_to_kpte(kmap_vstart); -} --- a/arch/mips/mm/init.c +++ b/arch/m...