Displaying 20 results from an estimated 62 matches for "pmd_mask".
2020 Jul 14
0
[PATCH v4 17/75] x86/boot/compressed/64: Change add_identity_map() to take start and end
..._end);
write_cr3(top_level_pgt);
}
@@ -189,7 +183,8 @@ static void do_pf_error(const char *msg, unsigned long error_code,
void do_boot_page_fault(struct pt_regs *regs, unsigned long error_code)
{
- unsigned long address = native_read_cr2();
+ unsigned long address = native_read_cr2() & PMD_MASK;
+ unsigned long end = address + PMD_SIZE;
/*
* Check for unexpected error codes. Unexpected are:
@@ -204,5 +199,5 @@ void do_boot_page_fault(struct pt_regs *regs, unsigned long error_code)
* Error code is sane - now identity map the 2M region around
* the faulting address.
*/
- add...
2020 Apr 28
0
[PATCH v3 24/75] x86/boot/compressed/64: Unmap GHCB page before booting the kernel
...apping_info, address, _PAGE_ENC, 0);
}
+int set_page_non_present(unsigned long address)
+{
+ return set_clr_page_flags(&mapping_info, address, 0, _PAGE_PRESENT);
+}
+
void do_boot_page_fault(struct pt_regs *regs, unsigned long error_code)
{
- unsigned long address = native_read_cr2() & PMD_MASK;
- unsigned long end = address + PMD_SIZE;
+ unsigned long address = native_read_cr2();
+ unsigned long end;
+ bool ghcb_fault;
+
+ ghcb_fault = sev_es_check_ghcb_fault(address);
+
+ address &= PMD_MASK;
+ end = address + PMD_SIZE;
/*
* Check for unexpected error codes. Unexpect...
2020 Apr 28
0
[PATCH v3 22/75] x86/boot/compressed/64: Add set_page_en/decrypted() helpers
..._pmd(struct x86_mapping_info *info,
+ pmd_t *pmdp, unsigned long __address)
+{
+ unsigned long page_flags;
+ unsigned long address;
+ pte_t *pte;
+ pmd_t pmd;
+ int i;
+
+ pte = (pte_t *)info->alloc_pgt_page(info->context);
+ if (!pte)
+ return NULL;
+
+ address = __address & PMD_MASK;
+ /* No large page - clear PSE flag */
+ page_flags = info->page_flag & ~_PAGE_PSE;
+
+ /* Populate the PTEs */
+ for (i = 0; i < PTRS_PER_PMD; i++) {
+ set_pte(&pte[i], __pte(address | page_flags));
+ address += PAGE_SIZE;
+ }
+
+ /*
+ * Ideally we need to clear the large PMD fi...
2019 Aug 06
0
[PATCH 09/15] mm: don't abuse pte_index() in hmm_vma_handle_pmd
...44
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -486,7 +486,7 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk,
if (pmd_protnone(pmd) || fault || write_fault)
return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
- pfn = pmd_pfn(pmd) + pte_index(addr);
+ pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
if (pmd_devmap(pmd)) {
pgmap = get_dev_pagemap(pfn, pgmap);
--
2.20.1
2020 Jun 19
0
[PATCH 13/16] mm: support THP migration to device private memory
...<asm/pgalloc.h>
#define CREATE_TRACE_POINTS
#include <trace/events/migrate.h>
@@ -2185,6 +2186,8 @@ static int migrate_vma_collect_hole(unsigned long start,
for (addr = start; addr < end; addr += PAGE_SIZE) {
migrate->src[migrate->npages] = flags;
+ if ((addr & ~PMD_MASK) == 0 && (end & ~PMD_MASK) == 0)
+ migrate->src[migrate->npages] |= MIGRATE_PFN_COMPOUND;
migrate->dst[migrate->npages] = 0;
migrate->npages++;
migrate->cpages++;
@@ -2219,48 +2222,87 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
unsigned long addr =...
2020 Jul 14
0
[PATCH v4 14/75] x86/boot/compressed/64: Add page-fault handler
...set
+ */
+ if (error_code & (X86_PF_PROT | X86_PF_USER | X86_PF_RSVD))
+ do_pf_error("Unexpected page-fault:", error_code, address, regs->ip);
+
+ /*
+ * Error code is sane - now identity map the 2M region around
+ * the faulting address.
+ */
+ add_identity_map(address & PMD_MASK, PMD_SIZE);
+}
diff --git a/arch/x86/boot/compressed/idt_64.c b/arch/x86/boot/compressed/idt_64.c
index 082cd6bca033..5f083092a86d 100644
--- a/arch/x86/boot/compressed/idt_64.c
+++ b/arch/x86/boot/compressed/idt_64.c
@@ -40,5 +40,7 @@ void load_stage2_idt(void)
{
boot_idt_desc.address = (unsign...
2020 Apr 02
0
[PATCH 14/70] x86/boot/compressed/64: Add page-fault handler
...s set
> + */
> + if (error_code & (X86_PF_PROT | X86_PF_USER | X86_PF_RSVD))
> + pf_error(error_code, address, regs);
> +
> + /*
> + * Error code is sane - now identity map the 2M region around
> + * the faulting address.
> + */
> + add_identity_map(address & PMD_MASK, PMD_SIZE);
> +}
> diff --git a/arch/x86/boot/compressed/idt_64.c b/arch/x86/boot/compressed/idt_64.c
> index 46ecea671b90..84ba57d9d436 100644
> --- a/arch/x86/boot/compressed/idt_64.c
> +++ b/arch/x86/boot/compressed/idt_64.c
> @@ -39,5 +39,7 @@ void load_stage2_idt(void)
>...
2020 Nov 06
0
[PATCH v3 3/6] mm: support THP migration to device private memory
...urn 0;
+ if (!vma_is_anonymous(walk->vma) ||
+ !((migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)))
+ return migrate_vma_collect_skip(start, end, walk);
+
+ if (thp_migration_supported() &&
+ (migrate->flags & MIGRATE_VMA_SELECT_COMPOUND) &&
+ (start & ~PMD_MASK) == 0 && (end & ~PMD_MASK) == 0) {
+ migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE |
+ MIGRATE_PFN_COMPOUND;
+ migrate->dst[migrate->npages] = 0;
+ migrate->npages++;
+ migrate->cpages++;
+ return migrate_vma_collect_skip(start + PAGE_SIZE, end, walk);...
2020 Apr 22
0
[PATCH hmm 2/5] mm/hmm: make hmm_range_fault return 0 or -1
...if (required_fault)
return hmm_vma_fault(addr, end, required_fault, walk);
- hmm_vma_walk->last = addr;
return hmm_pfns_fill(addr, end, range, HMM_PFN_NONE);
}
@@ -207,7 +206,6 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
- hmm_vma_walk->last = end;
return 0;
}
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -386,13 +384,10 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,...
2020 May 01
0
[PATCH hmm v2 2/5] mm/hmm: make hmm_range_fault return 0 or -1
...if (required_fault)
return hmm_vma_fault(addr, end, required_fault, walk);
- hmm_vma_walk->last = addr;
return hmm_pfns_fill(addr, end, range, HMM_PFN_NONE);
}
@@ -207,7 +206,6 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
- hmm_vma_walk->last = end;
return 0;
}
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -386,13 +384,10 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,...
2020 Jun 21
2
[PATCH 13/16] mm: support THP migration to device private memory
...fine CREATE_TRACE_POINTS
> #include <trace/events/migrate.h>
> @@ -2185,6 +2186,8 @@ static int migrate_vma_collect_hole(unsigned long start,
>
> for (addr = start; addr < end; addr += PAGE_SIZE) {
> migrate->src[migrate->npages] = flags;
> + if ((addr & ~PMD_MASK) == 0 && (end & ~PMD_MASK) == 0)
> + migrate->src[migrate->npages] |= MIGRATE_PFN_COMPOUND;
> migrate->dst[migrate->npages] = 0;
> migrate->npages++;
> migrate->cpages++;
> @@ -2219,48 +2222,87 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp...
2020 Nov 06
12
[PATCH v3 0/6] mm/hmm/nouveau: add THP migration to migrate_vma_*
This series adds support for transparent huge page migration to
migrate_vma_*() and adds nouveau SVM and HMM selftests as consumers.
Earlier versions were posted previously [1] and [2].
The patches apply cleanly to the linux-mm 5.10.0-rc2 tree. There are a
lot of other THP patches being posted. I don't think there are any
semantic conflicts but there may be some merge conflicts depending on
2019 Aug 06
24
hmm cleanups, v2
Hi Jérôme, Ben, Felix and Jason,
below is a series against the hmm tree which cleans up various minor
bits and allows HMM_MIRROR to be built on all architectures.
Diffstat:
11 files changed, 94 insertions(+), 210 deletions(-)
A git tree is also available at:
git://git.infradead.org/users/hch/misc.git hmm-cleanups.2
Gitweb:
2020 Sep 02
10
[PATCH v2 0/7] mm/hmm/nouveau: add THP migration to migrate_vma_*
This series adds support for transparent huge page migration to
migrate_vma_*() and adds nouveau SVM and HMM selftests as consumers.
An earlier version was posted previously [1]. This version now
supports splitting a THP midway in the migration process which
led to a number of changes.
The patches apply cleanly to the current linux-mm tree. Since there
are a couple of patches in linux-mm from Dan
2020 Jun 19
22
[PATCH 00/16] mm/hmm/nouveau: THP mapping and migration
These patches apply to linux-5.8.0-rc1. Patches 1-3 should probably go
into 5.8, the others can be queued for 5.9. Patches 4-6 improve the HMM
self tests. Patch 7-8 prepare nouveau for the meat of this series which
adds support and testing for compound page mapping of system memory
(patches 9-11) and compound page migration to device private memory
(patches 12-16). Since these changes are split
2020 Apr 22
11
[PATCH hmm 0/5] Adjust hmm_range_fault() API
From: Jason Gunthorpe <jgg at mellanox.com>
The API is a bit complicated for the uses we actually have, and
disucssions for simplifying have come up a number of times.
This small series removes the customizable pfn format and simplifies the
return code of hmm_range_fault()
All the drivers are adjusted to process in the simplified format.
I would appreciated tested-by's for the two
2020 Apr 22
0
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...to_hmm_pfn_flags(range, pmd);
required_fault =
- hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags);
+ hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags);
if (required_fault)
return hmm_vma_fault(addr, end, required_fault, walk);
pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
- pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
+ hmm_pfns[i] = pfn | cpu_flags;
return 0;
}
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
/* stub to allow the code below to compile */
int hmm_vma_ha...
2020 May 01
0
[PATCH hmm v2 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...to_hmm_pfn_flags(range, pmd);
required_fault =
- hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags);
+ hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags);
if (required_fault)
return hmm_vma_fault(addr, end, required_fault, walk);
pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
- pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
+ hmm_pfns[i] = pfn | cpu_flags;
return 0;
}
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
/* stub to allow the code below to compile */
int hmm_vma_ha...
2020 May 01
13
[PATCH hmm v2 0/5] Adjust hmm_range_fault() API
From: Jason Gunthorpe <jgg at mellanox.com>
The API is a bit complicated for the uses we actually have, and
disucssions for simplifying have come up a number of times.
This small series removes the customizable pfn format and simplifies the
return code of hmm_range_fault()
All the drivers are adjusted to process in the simplified format.
I would appreciated tested-by's for the two
2020 Apr 22
1
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...required_fault =
> - hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags);
> + hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags);
> if (required_fault)
> return hmm_vma_fault(addr, end, required_fault, walk);
>
> pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
> for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
> - pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
> + hmm_pfns[i] = pfn | cpu_flags;
> return 0;
> }
> #else /* CONFIG_TRANSPARENT_HUGEPAGE */
> /* stub to allow the code...