search for: pudp

Displaying 20 results from an estimated 103 matches for "pudp".

Did you mean: pud
2017 Sep 04
0
[PATCH] x86/paravirt: remove no longer used paravirt functions
...-{ - if (sizeof(pmdval_t) > sizeof(long)) - /* 5 arg words */ - pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd); - else - PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp, - native_pmd_val(pmd)); -} - -static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, - pud_t *pudp, pud_t pud) -{ - if (sizeof(pudval_t) > sizeof(long)) - /* 5 arg words */ - pv_mmu_ops.set_pud_at(mm, addr, pudp, pud); - else - PVOP_VCALL4(pv_mmu_ops.set_pud_at, mm, addr, pudp, - native_pud_val(pud)); -} - static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) { pmdval_t val = native...
2017 Sep 04
0
[PATCH] x86/paravirt: remove no longer used paravirt functions
...-{ - if (sizeof(pmdval_t) > sizeof(long)) - /* 5 arg words */ - pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd); - else - PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp, - native_pmd_val(pmd)); -} - -static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, - pud_t *pudp, pud_t pud) -{ - if (sizeof(pudval_t) > sizeof(long)) - /* 5 arg words */ - pv_mmu_ops.set_pud_at(mm, addr, pudp, pud); - else - PVOP_VCALL4(pv_mmu_ops.set_pud_at, mm, addr, pudp, - native_pud_val(pud)); -} - static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) { pmdval_t val = native...
2008 Jan 21
7
[PATCH 0/4] paravirt_ops-64 compile fixes
This series contain fixes to make the paravirt_ops code compile and boot on x86_64. This is a follow-up for the previous series from Glauber.
2008 Jan 21
7
[PATCH 0/4] paravirt_ops-64 compile fixes
This series contain fixes to make the paravirt_ops code compile and boot on x86_64. This is a follow-up for the previous series from Glauber.
2019 Jul 30
0
[PATCH 10/13] mm: only define hmm_vma_walk_pud if needed
...ge->flags[HMM_PFN_WRITE] : - range->flags[HMM_PFN_VALID]; -} - static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, unsigned long end, @@ -700,10 +691,19 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, return 0; } -static int hmm_vma_walk_pud(pud_t *pudp, - unsigned long start, - unsigned long end, - struct mm_walk *walk) +#if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \ + defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) +static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud) +{ + if (!pud_present...
2020 Aug 15
6
[PATCH v4 0/6] x86/paravirt: cleanup after 32-bit PV removal
A lot of cleanup after removal of 32-bit Xen PV guest support in paravirt code. Changes in V4: - dropped patches 1-3, as already committed - addressed comments to V3 - added new patches 5+6 Changes in V3: - addressed comments to V2 - split patch 1 into 2 patches - new patches 3 and 7 Changes in V2: - rebase to 5.8 kernel - addressed comments to V1 - new patches 3 and 4 Juergen Gross (6):
2020 Apr 28
0
[PATCH v3 22/75] x86/boot/compressed/64: Add set_page_en/decrypted() helpers
...or (cl = start; cl != end; cl += flush_size) + clflush(cl); +} + +static int set_clr_page_flags(struct x86_mapping_info *info, + unsigned long address, + pteval_t set, pteval_t clr) +{ + unsigned long scratch, *target; + pgd_t *pgdp = (pgd_t *)top_level_pgt; + p4d_t *p4dp; + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep, pte; + + /* + * First make sure there is a PMD mapping for 'address'. + * It should already exist, but keep things generic. + * + * To map the page just read from it and fault it in if there is no + * mapping yet. add_identity_map() can't be called he...
2020 Nov 06
0
[PATCH v3 3/6] mm: support THP migration to device private memory
...NE_DEVICE have one extra reference */ if (is_zone_device_page(page)) { /* @@ -2833,13 +2908,191 @@ int migrate_vma_setup(struct migrate_vma *args) } EXPORT_SYMBOL(migrate_vma_setup); +static pmd_t *find_pmd(struct mm_struct *mm, unsigned long addr) +{ + pgd_t *pgdp; + p4d_t *p4dp; + pud_t *pudp; + + pgdp = pgd_offset(mm, addr); + p4dp = p4d_alloc(mm, pgdp, addr); + if (!p4dp) + return NULL; + pudp = pud_alloc(mm, p4dp, addr); + if (!pudp) + return NULL; + return pmd_alloc(mm, pudp, addr); +} + +#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION +/* + * This code closely follows: + * do_huge_pmd_an...
2020 Aug 07
0
[PATCH v3 4/7] x86/paravirt: remove 32-bit support from PARAVIRT_XXL
...t; sizeof(long)) - ret = PVOP_CALLEE2(pmdval_t, mmu.pmd_val, - pmd.pmd, (u64)pmd.pmd >> 32); - else - ret = PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd); + ret = PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd); return ret; } @@ -504,12 +463,9 @@ static inline void set_pud(pud_t *pudp, pud_t pud) { pudval_t val = native_pud_val(pud); - if (sizeof(pudval_t) > sizeof(long)) - PVOP_VCALL3(mmu.set_pud, pudp, val, (u64)val >> 32); - else - PVOP_VCALL2(mmu.set_pud, pudp, val); + PVOP_VCALL2(mmu.set_pud, pudp, val); } -#if CONFIG_PGTABLE_LEVELS >= 4 + static inline...
2020 Aug 15
0
[PATCH v4 1/6] x86/paravirt: remove 32-bit support from PARAVIRT_XXL
...izeof(pmdval_t) > sizeof(long)) - ret = PVOP_CALLEE2(pmdval_t, mmu.pmd_val, - pmd.pmd, (u64)pmd.pmd >> 32); - else - ret = PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd); - - return ret; + return PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd); } static inline void set_pud(pud_t *pudp, pud_t pud) { - pudval_t val = native_pud_val(pud); - - if (sizeof(pudval_t) > sizeof(long)) - PVOP_VCALL3(mmu.set_pud, pudp, val, (u64)val >> 32); - else - PVOP_VCALL2(mmu.set_pud, pudp, val); + PVOP_VCALL2(mmu.set_pud, pudp, native_pud_val(pud)); } -#if CONFIG_PGTABLE_LEVELS >= 4...
2019 Jul 15
5
[PATCH 0/2] Remove 32-bit Xen PV guest support
The long term plan has been to replace Xen PV guests by PVH. The first victim of that plan are now 32-bit PV guests, as those are used only rather seldom these days. Xen on x86 requires 64-bit support and with Grub2 now supporting PVH officially since version 2.04 there is no need to keep 32-bit PV guest support alive in the Linux kernel. Additionally Meltdown mitigation is not available in the
2019 Jul 15
5
[PATCH 0/2] Remove 32-bit Xen PV guest support
The long term plan has been to replace Xen PV guests by PVH. The first victim of that plan are now 32-bit PV guests, as those are used only rather seldom these days. Xen on x86 requires 64-bit support and with Grub2 now supporting PVH officially since version 2.04 there is no need to keep 32-bit PV guest support alive in the Linux kernel. Additionally Meltdown mitigation is not available in the
2020 Aug 07
4
[PATCH v3 0/7] Remove 32-bit Xen PV guest support
The long term plan has been to replace Xen PV guests by PVH. The first victim of that plan are now 32-bit PV guests, as those are used only rather seldom these days. Xen on x86 requires 64-bit support and with Grub2 now supporting PVH officially since version 2.04 there is no need to keep 32-bit PV guest support alive in the Linux kernel. Additionally Meltdown mitigation is not available in the
2019 Aug 06
0
[PATCH 04/15] mm: remove the pgmap field from struct hmm_vma_walk
...() so that + * we can leverage the get_dev_pagemap() optimization which will not + * re-take a reference on a pgmap if we already have one. + */ + if (pgmap) + put_dev_pagemap(pgmap); pte_unmap(ptep - 1); hmm_vma_walk->last = addr; @@ -714,6 +709,7 @@ static int hmm_vma_walk_pud(pud_t *pudp, struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_range *range = hmm_vma_walk->range; unsigned long addr = start, next; + struct dev_pagemap *pgmap = NULL; pmd_t *pmdp; pud_t pud; int ret; @@ -744,17 +740,14 @@ static int hmm_vma_walk_pud(pud_t *pudp, pfn = pud_p...
2020 Jul 01
5
[PATCH v2 0/4] Remove 32-bit Xen PV guest support
The long term plan has been to replace Xen PV guests by PVH. The first victim of that plan are now 32-bit PV guests, as those are used only rather seldom these days. Xen on x86 requires 64-bit support and with Grub2 now supporting PVH officially since version 2.04 there is no need to keep 32-bit PV guest support alive in the Linux kernel. Additionally Meltdown mitigation is not available in the
2020 Jul 01
5
[PATCH v2 0/4] Remove 32-bit Xen PV guest support
The long term plan has been to replace Xen PV guests by PVH. The first victim of that plan are now 32-bit PV guests, as those are used only rather seldom these days. Xen on x86 requires 64-bit support and with Grub2 now supporting PVH officially since version 2.04 there is no need to keep 32-bit PV guest support alive in the Linux kernel. Additionally Meltdown mitigation is not available in the
2007 Jul 06
2
[PATCH] I386: Deactivate the test for the dead CONFIG_DEBUG_PAGE_TYPE variable.
Robert P. J. Day wrote: > Signed-off-by: Robert P. J. Day <rpjday@mindspring.com> > > --- > > diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c Maintainers are apparently those under "PARAVIRT_OPS INTERFACE". CCs added. > index c12720d..e3ce5c8 100644 > --- a/arch/i386/kernel/vmi.c > +++ b/arch/i386/kernel/vmi.c > @@ -235,7 +235,7 @@ static
2007 Jul 06
2
[PATCH] I386: Deactivate the test for the dead CONFIG_DEBUG_PAGE_TYPE variable.
Robert P. J. Day wrote: > Signed-off-by: Robert P. J. Day <rpjday@mindspring.com> > > --- > > diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c Maintainers are apparently those under "PARAVIRT_OPS INTERFACE". CCs added. > index c12720d..e3ce5c8 100644 > --- a/arch/i386/kernel/vmi.c > +++ b/arch/i386/kernel/vmi.c > @@ -235,7 +235,7 @@ static
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 01/17] paravirt_ops - core changes
...u64 clonepfn, u64 start, u64 count); + void (*release_pt)(u64 pfn); + void (*release_pd)(u64 pfn); + + void (*set_pte)(pte_t *ptep, pte_t pteval); + void (*set_pte_at)(struct mm_struct *mm, u64 addr, pte_t *ptep, pte_t pteval); + void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); + void (*set_pud)(pud_t *pudp, pud_t pudval); + void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval); + + void (*pte_update)(struct mm_struct *mm, u64 addr, pte_t *ptep); + void (*pte_update_defer)(struct mm_struct *mm, u64 addr, pte_t *ptep); + + pte_t (*ptep_get_and_clear)(struct mm_struct *mm, u64 addr, pte_t *ptep); + + void (*pte_cl...
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 01/17] paravirt_ops - core changes
...u64 clonepfn, u64 start, u64 count); + void (*release_pt)(u64 pfn); + void (*release_pd)(u64 pfn); + + void (*set_pte)(pte_t *ptep, pte_t pteval); + void (*set_pte_at)(struct mm_struct *mm, u64 addr, pte_t *ptep, pte_t pteval); + void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); + void (*set_pud)(pud_t *pudp, pud_t pudval); + void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval); + + void (*pte_update)(struct mm_struct *mm, u64 addr, pte_t *ptep); + void (*pte_update_defer)(struct mm_struct *mm, u64 addr, pte_t *ptep); + + pte_t (*ptep_get_and_clear)(struct mm_struct *mm, u64 addr, pte_t *ptep); + + void (*pte_cl...