search for: pmd_pfn

Displaying 20 results from an estimated 36 matches for "pmd_pfn".

Did you mean: pgd_pfn
2019 Aug 06
0
[PATCH 09/15] mm: don't abuse pte_index() in hmm_vma_handle_pmd
...iff --git a/mm/hmm.c b/mm/hmm.c index 03d37e102e3b..2083e4db46f5 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -486,7 +486,7 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk, if (pmd_protnone(pmd) || fault || write_fault) return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); - pfn = pmd_pfn(pmd) + pte_index(addr); + pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) { if (pmd_devmap(pmd)) { pgmap = get_dev_pagemap(pfn, pgmap); -- 2.20.1
2020 Sep 02
1
[PATCH v2 1/7] mm/thp: fix __split_huge_pmd_locked() for migration PMD
...le splitting a migrating PMD. > However, the code always increments the page->_mapcount and adjusts the > memory control group accounting assuming the page is mapped. > Also, if the PMD entry is a migration PMD entry, the call to > is_huge_zero_pmd(*pmd) is incorrect because it calls pmd_pfn(pmd) instead > of migration_entry_to_pfn(pmd_to_swp_entry(pmd)). > Fix these problems by checking for a PMD migration entry. > > Signed-off-by: Ralph Campbell <rcampbell at nvidia.com> Thanks for the fix. You can add Reviewed-by: Zi Yan <ziy at nvidia.com> I think you also...
2009 Jun 05
1
[PATCH] lguest: PAE support
...+++ b/drivers/lguest/lg.h @@ -137,6 +137,8 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user); * in the kernel. */ #define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK) #define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT) +#define pmd_flags(x) (pmd_val(x) & ~PAGE_MASK) +#define pmd_pfn(x) (pmd_val(x) >> PAGE_SHIFT) /* interrupts_and_traps.c: */ void maybe_do_interrupt(struct lg_cpu *cpu); @@ -169,6 +171,9 @@ int init_guest_pagetable(struct lguest *lg); void free_guest_pagetable(struct lguest *lg); void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable); v...
2009 Jun 05
1
[PATCH] lguest: PAE support
...+++ b/drivers/lguest/lg.h @@ -137,6 +137,8 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user); * in the kernel. */ #define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK) #define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT) +#define pmd_flags(x) (pmd_val(x) & ~PAGE_MASK) +#define pmd_pfn(x) (pmd_val(x) >> PAGE_SHIFT) /* interrupts_and_traps.c: */ void maybe_do_interrupt(struct lg_cpu *cpu); @@ -169,6 +171,9 @@ int init_guest_pagetable(struct lguest *lg); void free_guest_pagetable(struct lguest *lg); void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable); v...
2009 Apr 16
1
NULL pointer dereference at __switch_to() ( __unlazy_fpu ) with lguest PAE patch
...ir; pgd_t *pgdir; }; @@ -137,6 +137,8 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user); * in the kernel. */ #define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK) #define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT) +#define pmd_flags(x) (pmd_val(x) & ~PAGE_MASK) +#define pmd_pfn(x) (pmd_val(x) >> PAGE_SHIFT) /* interrupts_and_traps.c: */ void maybe_do_interrupt(struct lg_cpu *cpu); @@ -168,6 +170,9 @@ int init_guest_pagetable(struct lguest *lg); void free_guest_pagetable(struct lguest *lg); void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable); v...
2009 Apr 16
1
NULL pointer dereference at __switch_to() ( __unlazy_fpu ) with lguest PAE patch
...ir; pgd_t *pgdir; }; @@ -137,6 +137,8 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user); * in the kernel. */ #define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK) #define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT) +#define pmd_flags(x) (pmd_val(x) & ~PAGE_MASK) +#define pmd_pfn(x) (pmd_val(x) >> PAGE_SHIFT) /* interrupts_and_traps.c: */ void maybe_do_interrupt(struct lg_cpu *cpu); @@ -168,6 +170,9 @@ int init_guest_pagetable(struct lguest *lg); void free_guest_pagetable(struct lguest *lg); void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable); v...
2019 Aug 07
2
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...->private; > struct hmm_range *range = hmm_vma_walk->range; > + struct dev_pagemap *pgmap = NULL; > unsigned long pfn, npages, i; > bool fault, write_fault; > uint64_t cpu_flags; > @@ -490,17 +490,14 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk, > pfn = pmd_pfn(pmd) + pte_index(addr); > for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) { > if (pmd_devmap(pmd)) { > - hmm_vma_walk->pgmap = get_dev_pagemap(pfn, > - hmm_vma_walk->pgmap); > - if (unlikely(!hmm_vma_walk->pgmap)) > + pgmap = get_dev_pagemap...
2020 Sep 02
0
[PATCH v2 1/7] mm/thp: fix __split_huge_pmd_locked() for migration PMD
...md_locked() can handle splitting a migrating PMD. However, the code always increments the page->_mapcount and adjusts the memory control group accounting assuming the page is mapped. Also, if the PMD entry is a migration PMD entry, the call to is_huge_zero_pmd(*pmd) is incorrect because it calls pmd_pfn(pmd) instead of migration_entry_to_pfn(pmd_to_swp_entry(pmd)). Fix these problems by checking for a PMD migration entry. Signed-off-by: Ralph Campbell <rcampbell at nvidia.com> --- mm/huge_memory.c | 42 +++++++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 19 deletio...
2019 Aug 06
0
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...t hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_range *range = hmm_vma_walk->range; + struct dev_pagemap *pgmap = NULL; unsigned long pfn, npages, i; bool fault, write_fault; uint64_t cpu_flags; @@ -490,17 +490,14 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk, pfn = pmd_pfn(pmd) + pte_index(addr); for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) { if (pmd_devmap(pmd)) { - hmm_vma_walk->pgmap = get_dev_pagemap(pfn, - hmm_vma_walk->pgmap); - if (unlikely(!hmm_vma_walk->pgmap)) + pgmap = get_dev_pagemap(pfn, pgmap); + if (unlikely...
2019 Aug 07
0
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
..._vma_walk->range; > > + struct dev_pagemap *pgmap = NULL; > > unsigned long pfn, npages, i; > > bool fault, write_fault; > > uint64_t cpu_flags; > > @@ -490,17 +490,14 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk, > > pfn = pmd_pfn(pmd) + pte_index(addr); > > for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) { > > if (pmd_devmap(pmd)) { > > - hmm_vma_walk->pgmap = get_dev_pagemap(pfn, > > - hmm_vma_walk->pgm...
2020 Mar 20
0
[PATCH 2/2] mm/thp: Rename pmd_mknotpresent() as pmd_mknotvalid()
...pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -589,7 +589,7 @@ static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot) return __pud(pfn | check_pgprot(pgprot)); } -static inline pmd_t pmd_mknotpresent(pmd_t pmd) +static inline pmd_t pmd_mknotvalid(pmd_t pmd) { return pfn_pmd(pmd_pfn(pmd), __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE))); diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index 49d7814b59a9..f9f61b934475 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c @@ -130,7 +130,7 @@ static void clear_pmd_presence(pmd_t *pmd, bool clea...
2020 Apr 22
0
[PATCH hmm 2/5] mm/hmm: make hmm_range_fault return 0 or -1
...dr, unsigned long end, } if (required_fault) return hmm_vma_fault(addr, end, required_fault, walk); - hmm_vma_walk->last = addr; return hmm_pfns_fill(addr, end, range, HMM_PFN_NONE); } @@ -207,7 +206,6 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags; - hmm_vma_walk->last = end; return 0; } #else /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -386,13 +384,10 @@ static int hm...
2020 May 01
0
[PATCH hmm v2 2/5] mm/hmm: make hmm_range_fault return 0 or -1
...dr, unsigned long end, } if (required_fault) return hmm_vma_fault(addr, end, required_fault, walk); - hmm_vma_walk->last = addr; return hmm_pfns_fill(addr, end, range, HMM_PFN_NONE); } @@ -207,7 +206,6 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags; - hmm_vma_walk->last = end; return 0; } #else /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -386,13 +384,10 @@ static int hm...
2020 Sep 03
1
[PATCH v3] mm/thp: fix __split_huge_pmd_locked() for migration PMD
...md_locked() can handle splitting a migrating PMD. However, the code always increments the page->_mapcount and adjusts the memory control group accounting assuming the page is mapped. Also, if the PMD entry is a migration PMD entry, the call to is_huge_zero_pmd(*pmd) is incorrect because it calls pmd_pfn(pmd) instead of migration_entry_to_pfn(pmd_to_swp_entry(pmd)). Fix these problems by checking for a PMD migration entry. Fixes: 84c3fc4e9c56 ("mm: thp: check pmd migration entry in common path") cc: stable at vger.kernel.org # 4.14+ Signed-off-by: Ralph Campbell <rcampbell at nvidia.c...
2019 Aug 06
24
hmm cleanups, v2
Hi Jérôme, Ben, Felix and Jason, below is a series against the hmm tree which cleans up various minor bits and allows HMM_MIRROR to be built on all architectures. Diffstat: 11 files changed, 94 insertions(+), 210 deletions(-) A git tree is also available at: git://git.infradead.org/users/hch/misc.git hmm-cleanups.2 Gitweb:
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 08/17] paravirt_ops - memory managment
...ine int pmd_large(pmd_t pte) { pmd_index(address)) #define pmd_none(x) (!pmd_val(x)) #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) -#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot))) #define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) @@ -362,19 +372,20 @@ static inline int pmd_large(pmd_t pte) { /* physical address -> PTE */ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) { - pte_t pte; - pte_val(pte) = physpage | pgprot_val(pgprot);...
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 08/17] paravirt_ops - memory managment
...ine int pmd_large(pmd_t pte) { pmd_index(address)) #define pmd_none(x) (!pmd_val(x)) #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) -#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot))) #define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) @@ -362,19 +372,20 @@ static inline int pmd_large(pmd_t pte) { /* physical address -> PTE */ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) { - pte_t pte; - pte_val(pte) = physpage | pgprot_val(pgprot);...
2020 Apr 22
1
[PATCH V2 0/2] mm/thp: Rename pmd_mknotpresent() as pmd_mkinvalid()
This series renames pmd_mknotpresent() as pmd_mkinvalid(). Before that it drops an existing pmd_mknotpresent() definition from powerpc platform which was never required as it defines it's pmdp_invalidate() through subscribing __HAVE_ARCH_PMDP_INVALIDATE. This does not create any functional change. This rename was suggested by Catalin during a previous discussion while we were trying to change
2020 Mar 20
4
[PATCH 0/2] mm/thp: Rename pmd_mknotpresent() as pmd_mknotvalid()
This series renames pmd_mknotpresent() as pmd_mknotvalid(). Before that it drops an existing pmd_mknotpresent() definition from powerpc platform which was never required as it defines it's pmdp_invalidate() through subscribing __HAVE_ARCH_PMDP_INVALIDATE. This does not create any functional change. This rename was suggested by Catalin during a previous discussion while we were trying to
2007 Oct 31
5
[PATCH 0/7] (Re-)introducing pvops for x86_64 - Real pvops work part
Hey folks, This is the part-of-pvops-implementation-that-is-not-exactly-a-merge. Neat, uh? This is the majority of the work. The first patch in the series does not really belong here. It was already sent to lkml separetedly before, but I'm including it again, for a very simple reason: Try to test the paravirt patches without it, and you'll fail miserably ;-) (and it was not yet