search for: pte_pfn

Displaying 20 results from an estimated 141 matches for "pte_pfn".

Did you mean: pte_mfn
2019 Sep 12
1
[PATCH 2/4] mm/hmm: allow snapshot of the special zero page
...2,7 +532,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, > return -EBUSY; > } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) { > *pfn = range->values[HMM_PFN_SPECIAL]; > - return -EFAULT; > + return is_zero_pfn(pte_pfn(pte)) ? 0 : -EFAULT; Any chance to just use a normal if here: if (!is_zero_pfn(pte_pfn(pte))) return -EFAULT; return 0;
2019 Jul 30
0
[PATCH 08/13] mm: remove the mask variable in hmm_vma_walk_hugetlb_entry
...lt; spinlock_t *ptl; pte_t entry; int ret = 0; - mask = huge_page_size(h) - 1; - ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); entry = huge_ptep_get(pte); @@ -799,7 +796,7 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, goto unlock; } - pfn = pte_pfn(entry) + ((start & mask) >> PAGE_SHIFT); + pfn = pte_pfn(entry) + ((start & hmask) >> PAGE_SHIFT); for (; addr < end; addr += PAGE_SIZE, i++, pfn++) range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags; -- 2.20.1
2019 Aug 06
0
[PATCH 08/15] mm: remove the mask variable in hmm_vma_walk_hugetlb_entry
...lt; spinlock_t *ptl; pte_t entry; int ret = 0; - mask = huge_page_size(h) - 1; - ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); entry = huge_ptep_get(pte); @@ -799,7 +796,7 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, goto unlock; } - pfn = pte_pfn(entry) + ((start & mask) >> PAGE_SHIFT); + pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT); for (; addr < end; addr += PAGE_SIZE, i++, pfn++) range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags; -- 2.20.1
2019 Sep 11
0
[PATCH 2/4] mm/hmm: allow snapshot of the special zero page
...m.c +++ b/mm/hmm.c @@ -532,7 +532,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, return -EBUSY; } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) { *pfn = range->values[HMM_PFN_SPECIAL]; - return -EFAULT; + return is_zero_pfn(pte_pfn(pte)) ? 0 : -EFAULT; } *pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags; -- 2.20.1
2020 May 20
1
[PATCH v3 51/75] x86/sev-es: Handle MMIO events
...pgd, va, &level); > + if (!pte) > + return 0; '0' is a valid physical address. It happens to be reserved in the kernel thanks to L1TF, but using '0' as an error code is ugly. Not to mention none of the callers actually check the result. > + > + pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; > + pa |= va & ~page_level_mask(level); > + > + return pa; > +}
2019 Jul 30
29
hmm_range_fault related fixes and legacy API removal v3
Hi Jérôme, Ben, Felxi and Jason, below is a series against the hmm tree which cleans up various minor bits and allows HMM_MIRROR to be built on all architectures. Diffstat: 7 files changed, 81 insertions(+), 171 deletions(-) A git tree is also available at: git://git.infradead.org/users/hch/misc.git hmm-cleanups Gitweb:
2019 Aug 06
0
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...p **pgmap) { struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_range *range = hmm_vma_walk->range; @@ -591,9 +588,8 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, goto fault; if (pte_devmap(pte)) { - hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte), - hmm_vma_walk->pgmap); - if (unlikely(!hmm_vma_walk->pgmap)) + *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap); + if (unlikely(!*pgmap)) return -EBUSY; } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) { *pfn = range->values[HMM_PFN...
2019 Jul 30
0
[PATCH 07/13] mm: remove the page_shift member from struct hmm_range
...rt - range->start) >> PAGE_SHIFT; orig_pfn = range->pfns[i]; range->pfns[i] = range->values[HMM_PFN_NONE]; cpu_flags = pte_to_hmm_pfn_flags(range, entry); @@ -812,8 +799,8 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, goto unlock; } - pfn = pte_pfn(entry) + ((start & mask) >> range->page_shift); - for (; addr < end; addr += size, i++, pfn += pfn_inc) + pfn = pte_pfn(entry) + ((start & mask) >> PAGE_SHIFT); + for (; addr < end; addr += PAGE_SIZE, i++, pfn++) range->pfns[i] = hmm_device_entry_from_pfn(range, p...
2019 Aug 06
0
[PATCH 07/15] mm: remove the page_shift member from struct hmm_range
...rt - range->start) >> PAGE_SHIFT; orig_pfn = range->pfns[i]; range->pfns[i] = range->values[HMM_PFN_NONE]; cpu_flags = pte_to_hmm_pfn_flags(range, entry); @@ -812,8 +799,8 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, goto unlock; } - pfn = pte_pfn(entry) + ((start & mask) >> range->page_shift); - for (; addr < end; addr += size, i++, pfn += pfn_inc) + pfn = pte_pfn(entry) + ((start & mask) >> PAGE_SHIFT); + for (; addr < end; addr += PAGE_SIZE, i++, pfn++) range->pfns[i] = hmm_device_entry_from_pfn(range, p...
2020 Nov 03
0
[patch V3 24/37] sched: highmem: Store local kmaps in task struct
...NSA to calculate the + * coloured PTE index. Uses the PFN encoded into the pteval + * and the map index calculation because the actual mapped + * virtual address is not stored in task::kmap_ctrl. + * For any sane architecture this is optimized out. + */ + idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); + + addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + arch_kmap_local_pre_unmap(addr); + pte_clear(&init_mm, addr, kmap_pte - idx); + arch_kmap_local_post_unmap(addr); + } +} + +void __kmap_local_sched_in(void) +{ + struct task_struct *tsk = current; + pte_t *kmap_pte = kmap_get_pte()...
2019 Aug 06
24
hmm cleanups, v2
Hi Jérôme, Ben, Felix and Jason, below is a series against the hmm tree which cleans up various minor bits and allows HMM_MIRROR to be built on all architectures. Diffstat: 11 files changed, 94 insertions(+), 210 deletions(-) A git tree is also available at: git://git.infradead.org/users/hch/misc.git hmm-cleanups.2 Gitweb:
2007 Apr 18
0
[patch 6/9] Guest page hinting: writable page table entries.
...e_val); diff -urpN linux-2.6/mm/memory.c linux-2.6-patched/mm/memory.c --- linux-2.6/mm/memory.c 2006-09-01 12:50:24.000000000 +0200 +++ linux-2.6-patched/mm/memory.c 2006-09-01 12:50:24.000000000 +0200 @@ -1558,6 +1558,7 @@ static int do_wp_page(struct mm_struct * flush_cache_page(vma, address, pte_pfn(orig_pte)); entry = pte_mkyoung(orig_pte); entry = maybe_mkwrite(pte_mkdirty(entry), vma); + page_check_writable(old_page, entry); ptep_set_access_flags(vma, address, page_table, entry, 1); update_mmu_cache(vma, address, entry); lazy_mmu_prot_update(entry); @@ -1607,6 +1608,7 @@ got...
2007 Apr 18
0
[patch 6/9] Guest page hinting: writable page table entries.
...e_val); diff -urpN linux-2.6/mm/memory.c linux-2.6-patched/mm/memory.c --- linux-2.6/mm/memory.c 2006-09-01 12:50:24.000000000 +0200 +++ linux-2.6-patched/mm/memory.c 2006-09-01 12:50:24.000000000 +0200 @@ -1558,6 +1558,7 @@ static int do_wp_page(struct mm_struct * flush_cache_page(vma, address, pte_pfn(orig_pte)); entry = pte_mkyoung(orig_pte); entry = maybe_mkwrite(pte_mkdirty(entry), vma); + page_check_writable(old_page, entry); ptep_set_access_flags(vma, address, page_table, entry, 1); update_mmu_cache(vma, address, entry); lazy_mmu_prot_update(entry); @@ -1607,6 +1608,7 @@ got...
2020 Apr 22
0
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...+ hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags); if (required_fault) goto fault; @@ -297,15 +287,15 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, * fall through and treat it like a normal page. */ if (pte_special(pte) && !is_zero_pfn(pte_pfn(pte))) { - if (hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0)) { + if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) { pte_unmap(ptep); return -EFAULT; } - *pfn = range->values[HMM_PFN_ERROR]; + *hmm_pfn = HMM_PFN_ERROR; return 0; } - *pfn = hmm_device_entry_from_pfn(r...
2020 May 01
0
[PATCH hmm v2 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...+ hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags); if (required_fault) goto fault; @@ -297,15 +287,15 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, * fall through and treat it like a normal page. */ if (pte_special(pte) && !is_zero_pfn(pte_pfn(pte))) { - if (hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0)) { + if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) { pte_unmap(ptep); return -EFAULT; } - *pfn = range->values[HMM_PFN_ERROR]; + *hmm_pfn = HMM_PFN_ERROR; return 0; } - *pfn = hmm_device_entry_from_pfn(r...
2020 May 08
0
[PATCH 4/6] mm/hmm: add output flag for compound page mapping
...d_write(pud) ? + (HMM_PFN_VALID | HMM_PFN_COMPOUND | HMM_PFN_WRITE) : + (HMM_PFN_VALID | HMM_PFN_COMPOUND); } static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, @@ -484,7 +488,7 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT); for (; addr < end; addr += PAGE_SIZE, i++, pfn++) - range->hmm_pfns[i] = pfn | cpu_flags; + range->hmm_pfns[i] = pfn | cpu_flags | HMM_PFN_COMPOUND; spin_unlock(ptl); return 0; -- 2.20.1
2020 Jun 19
0
[PATCH 01/16] mm: fix migrate_vma_setup() src_owner and normal pages
...te.c index f37729673558..24535281cea3 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2295,8 +2295,6 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, if (is_write_device_private_entry(entry)) mpfn |= MIGRATE_PFN_WRITE; } else { - if (migrate->src_owner) - goto next; pfn = pte_pfn(pte); if (is_zero_pfn(pfn)) { mpfn = MIGRATE_PFN_MIGRATE; -- 2.20.1
2020 Jun 19
0
[PATCH 09/16] mm/hmm: add output flag for compound page mapping
...d_write(pud) ? + (HMM_PFN_VALID | HMM_PFN_COMPOUND | HMM_PFN_WRITE) : + (HMM_PFN_VALID | HMM_PFN_COMPOUND); } static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, @@ -484,7 +488,7 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT); for (; addr < end; addr += PAGE_SIZE, i++, pfn++) - range->hmm_pfns[i] = pfn | cpu_flags; + range->hmm_pfns[i] = pfn | cpu_flags | HMM_PFN_COMPOUND; spin_unlock(ptl); return 0; -- 2.20.1
2007 Apr 18
0
[PATCH 3/5] Fix missing pte update.patch
...le (0) #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR -#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte_low, 0)) +#define raw_ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte_low, 0)) #define pte_page(x) pfn_to_page(pte_pfn(x)) #define pte_none(x) (!(x).pte_low) diff -r f1dd818c2f06 include/asm-i386/pgtable-3level.h --- a/include/asm-i386/pgtable-3level.h Thu Oct 19 03:03:09 2006 -0700 +++ b/include/asm-i386/pgtable-3level.h Thu Oct 19 03:03:18 2006 -0700 @@ -119,8 +119,7 @@ static inline void pmd_clear(pmd_t *pmd)...
2007 Apr 18
0
[PATCH 3/5] Fix missing pte update.patch
...le (0) #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR -#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte_low, 0)) +#define raw_ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte_low, 0)) #define pte_page(x) pfn_to_page(pte_pfn(x)) #define pte_none(x) (!(x).pte_low) diff -r f1dd818c2f06 include/asm-i386/pgtable-3level.h --- a/include/asm-i386/pgtable-3level.h Thu Oct 19 03:03:09 2006 -0700 +++ b/include/asm-i386/pgtable-3level.h Thu Oct 19 03:03:18 2006 -0700 @@ -119,8 +119,7 @@ static inline void pmd_clear(pmd_t *pmd)...