Displaying 20 results from an estimated 51 matches for "pte_index".
2019 Aug 06
0
[PATCH 09/15] mm: don't abuse pte_index() in hmm_vma_handle_pmd
pte_index is an internal arch helper in various architectures,
without consistent semantics. Open code that calculation of a PMD
index based on the virtual address instead.
Signed-off-by: Christoph Hellwig <hch at lst.de>
---
mm/hmm.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git...
2007 Apr 18
1
[PATCH 4/9] Vmi fix highpte
...addr, ptep) do { } while (0)
+#define paravirt_map_pt_hook(slot, va, pfn) do { } while (0)
#endif
/*
@@ -469,10 +470,24 @@ extern pte_t *lookup_address(unsigned lo
#endif
#if defined(CONFIG_HIGHPTE)
-#define pte_offset_map(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
-#define pte_offset_map_nested(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
+#define pte_offset_map(dir, address) \
+({ \
+ pte_t *__ptep; \
+ unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \
+ __ptep = (pte_t *)kmap_atomi...
2007 Apr 18
1
[PATCH 4/9] Vmi fix highpte
...addr, ptep) do { } while (0)
+#define paravirt_map_pt_hook(slot, va, pfn) do { } while (0)
#endif
/*
@@ -469,10 +470,24 @@ extern pte_t *lookup_address(unsigned lo
#endif
#if defined(CONFIG_HIGHPTE)
-#define pte_offset_map(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
-#define pte_offset_map_nested(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
+#define pte_offset_map(dir, address) \
+({ \
+ pte_t *__ptep; \
+ unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \
+ __ptep = (pte_t *)kmap_atomi...
2009 Sep 21
1
[PATCH 2/5] lguest: use set_pte/set_pmd uniformly for real page table entries
...179,10 +1176,8 @@ void map_switcher_in_guest(struct lg_cpu
* page is already mapped there, we don't have to copy them out
* again.
*/
- pfn = __pa(cpu->regs_page) >> PAGE_SHIFT;
- native_set_pte(®s_pte, pfn_pte(pfn, PAGE_KERNEL));
- native_set_pte(&switcher_pte_page[pte_index((unsigned long)pages)],
- regs_pte);
+ regs_pte = pfn_pte(__pa(cpu->regs_page) >> PAGE_SHIFT, PAGE_KERNEL);
+ set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], regs_pte);
}
/*:*/
@@ -1209,7 +1204,7 @@ static __init void populate_switcher_pte
/* The first entries ar...
2009 Sep 21
1
[PATCH 2/5] lguest: use set_pte/set_pmd uniformly for real page table entries
...179,10 +1176,8 @@ void map_switcher_in_guest(struct lg_cpu
* page is already mapped there, we don't have to copy them out
* again.
*/
- pfn = __pa(cpu->regs_page) >> PAGE_SHIFT;
- native_set_pte(®s_pte, pfn_pte(pfn, PAGE_KERNEL));
- native_set_pte(&switcher_pte_page[pte_index((unsigned long)pages)],
- regs_pte);
+ regs_pte = pfn_pte(__pa(cpu->regs_page) >> PAGE_SHIFT, PAGE_KERNEL);
+ set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], regs_pte);
}
/*:*/
@@ -1209,7 +1204,7 @@ static __init void populate_switcher_pte
/* The first entries ar...
2019 Aug 07
2
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...gt; struct hmm_range *range = hmm_vma_walk->range;
> + struct dev_pagemap *pgmap = NULL;
> unsigned long pfn, npages, i;
> bool fault, write_fault;
> uint64_t cpu_flags;
> @@ -490,17 +490,14 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk,
> pfn = pmd_pfn(pmd) + pte_index(addr);
> for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
> if (pmd_devmap(pmd)) {
> - hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
> - hmm_vma_walk->pgmap);
> - if (unlikely(!hmm_vma_walk->pgmap))
> + pgmap = get_dev_pagemap(pfn, pgmap);
>...
2007 Apr 18
1
[RFC, PATCH 19/24] i386 Vmi mmu changes
...t *dst, pgd_t *src, int count);
*
@@ -397,11 +414,26 @@ extern pte_t *lookup_address(unsigned lo
extern void noexec_setup(const char *str);
+#include <asm/pgalloc.h>
#if defined(CONFIG_HIGHPTE)
-#define pte_offset_map(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
-#define pte_offset_map_nested(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
+#define pte_offset_map(dir, address) \
+({ \
+ pte_t *__ptep; \
+ unsigned pfn = pmd_val(*dir) >> PAGE_SHIFT; \
+ __ptep = (pte_t *)kmap_atomi...
2007 Apr 18
1
[RFC, PATCH 19/24] i386 Vmi mmu changes
...t *dst, pgd_t *src, int count);
*
@@ -397,11 +414,26 @@ extern pte_t *lookup_address(unsigned lo
extern void noexec_setup(const char *str);
+#include <asm/pgalloc.h>
#if defined(CONFIG_HIGHPTE)
-#define pte_offset_map(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
-#define pte_offset_map_nested(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
+#define pte_offset_map(dir, address) \
+({ \
+ pte_t *__ptep; \
+ unsigned pfn = pmd_val(*dir) >> PAGE_SHIFT; \
+ __ptep = (pte_t *)kmap_atomi...
2009 Jun 05
1
[PATCH] lguest: PAE support
...if the PMD entry wasn't valid */
+ BUG_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT));
+#else
pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
/* You should never call this if the PGD entry wasn't valid */
BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
+#endif
+
return &page[pte_index(vaddr)];
}
@@ -101,10 +147,31 @@ static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t);
}
-static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr)
+#ifdef CONFIG_X86_PAE
+static unsigned l...
2009 Jun 05
1
[PATCH] lguest: PAE support
...if the PMD entry wasn't valid */
+ BUG_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT));
+#else
pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
/* You should never call this if the PGD entry wasn't valid */
BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
+#endif
+
return &page[pte_index(vaddr)];
}
@@ -101,10 +147,31 @@ static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t);
}
-static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr)
+#ifdef CONFIG_X86_PAE
+static unsigned l...
2019 Aug 06
0
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...*hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range;
+ struct dev_pagemap *pgmap = NULL;
unsigned long pfn, npages, i;
bool fault, write_fault;
uint64_t cpu_flags;
@@ -490,17 +490,14 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk,
pfn = pmd_pfn(pmd) + pte_index(addr);
for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
if (pmd_devmap(pmd)) {
- hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
- hmm_vma_walk->pgmap);
- if (unlikely(!hmm_vma_walk->pgmap))
+ pgmap = get_dev_pagemap(pfn, pgmap);
+ if (unlikely(!pgmap))
re...
2019 Aug 07
0
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...ange;
> > + struct dev_pagemap *pgmap = NULL;
> > unsigned long pfn, npages, i;
> > bool fault, write_fault;
> > uint64_t cpu_flags;
> > @@ -490,17 +490,14 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk,
> > pfn = pmd_pfn(pmd) + pte_index(addr);
> > for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
> > if (pmd_devmap(pmd)) {
> > - hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
> > - hmm_vma_walk->pgmap);
> > -...
2019 Aug 06
24
hmm cleanups, v2
Hi Jérôme, Ben, Felix and Jason,
below is a series against the hmm tree which cleans up various minor
bits and allows HMM_MIRROR to be built on all architectures.
Diffstat:
11 files changed, 94 insertions(+), 210 deletions(-)
A git tree is also available at:
git://git.infradead.org/users/hch/misc.git hmm-cleanups.2
Gitweb:
2020 Apr 28
0
[PATCH v3 22/75] x86/boot/compressed/64: Add set_page_en/decrypted() helpers
...m large to small mappings and we are
+ * also the only user of the page-table, so there is no chance
+ * of a TLB multihit.
+ */
+ pmd = __pmd((unsigned long)pte | info->kernpg_flag);
+ set_pmd(pmdp, pmd);
+ /* Flush TLB to establish the new PMD */
+ write_cr3(top_level_pgt);
+
+ return pte + pte_index(__address);
+}
+
+static void clflush_page(unsigned long address)
+{
+ unsigned int flush_size;
+ char *cl, *start, *end;
+
+ /*
+ * Hardcode cl-size to 64 - CPUID can't be used here because that might
+ * cause another #VC exception and the GHCB is not ready to use yet.
+ */
+ flush_size =...
2006 Mar 14
12
[RFC] VMI for Xen?
I''m sure everyone has seen the drop of VMI patches for Linux at this
point, but just in case, the link is included below.
I''ve read this version of the VMI spec and have made my way through most
of the patches. While I wasn''t really that impressed with the first
spec wrt Xen, the second version seems to be much more palatable.
Specifically, the code inlining and
2007 Apr 18
17
[patch 00/17] paravirt_ops updates
Hi Andi,
This series of patches updates paravirt_ops in various ways. Some of the
changes are plain cleanups and improvements, and some add some interfaces
necessary for Xen.
The brief overview:
add-MAINTAINERS.patch - obvious
remove-CONFIG_DEBUG_PARAVIRT.patch - no longer needed
paravirt-nop.patch - mark nop operations consistently
paravirt-pte-accessors.patch - operations to pack/unpack
2007 Apr 18
17
[patch 00/17] paravirt_ops updates
Hi Andi,
This series of patches updates paravirt_ops in various ways. Some of the
changes are plain cleanups and improvements, and some add some interfaces
necessary for Xen.
The brief overview:
add-MAINTAINERS.patch - obvious
remove-CONFIG_DEBUG_PARAVIRT.patch - no longer needed
paravirt-nop.patch - mark nop operations consistently
paravirt-pte-accessors.patch - operations to pack/unpack
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 08/17] paravirt_ops - memory managment
...mp;= _PAGE_CHG_MASK;
- pte_val(pte) |= pgprot_val(newprot);
- pte_val(pte) &= __supported_pte_mask;
- return pte;
+ unsigned long pte = pte_val(pte_old);
+ pte &= _PAGE_CHG_MASK;
+ pte |= pgprot_val(newprot);
+ pte &= __supported_pte_mask;
+ return __pte(pte);
}
#define pte_index(address) \
Index: clean-start/include/asm-x86_64/tlbflush.h
===================================================================
--- clean-start.orig/include/asm-x86_64/tlbflush.h
+++ clean-start/include/asm-x86_64/tlbflush.h
@@ -4,6 +4,7 @@
#include <linux/mm.h>
#include <asm/processor.h...
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 08/17] paravirt_ops - memory managment
...mp;= _PAGE_CHG_MASK;
- pte_val(pte) |= pgprot_val(newprot);
- pte_val(pte) &= __supported_pte_mask;
- return pte;
+ unsigned long pte = pte_val(pte_old);
+ pte &= _PAGE_CHG_MASK;
+ pte |= pgprot_val(newprot);
+ pte &= __supported_pte_mask;
+ return __pte(pte);
}
#define pte_index(address) \
Index: clean-start/include/asm-x86_64/tlbflush.h
===================================================================
--- clean-start.orig/include/asm-x86_64/tlbflush.h
+++ clean-start/include/asm-x86_64/tlbflush.h
@@ -4,6 +4,7 @@
#include <linux/mm.h>
#include <asm/processor.h...
2007 Apr 18
23
[patch 00/20] paravirt_ops updates
Hi Andi,
Here's a repost of the paravirt_ops update series I posted the other day.
Since then, I found a few potential bugs with patching clobbering,
cleaned up and documented paravirt.h and the patching machinery.
Overview:
add-MAINTAINERS.patch
obvious
remove-CONFIG_DEBUG_PARAVIRT.patch
No longer meaningful or needed.
paravirt-nop.patch
Clean up nop paravirt_ops functions, mainly to