search for: set_pmd

Displaying 20 results from an estimated 172 matches for "set_pmd".

2009 Sep 21
1
[PATCH 2/5] lguest: use set_pte/set_pmd uniformly for real page table entries
If we're building a pte, we can use simple assigment; only use set_pte etc. when we're actually going to use that destination as a PTE. I don't know that we'll ever run under Xen, but it's neater. And use set_pte/set_pmd rather than assuming native_ versions, even though that's probably true for most people. (Includes compile fix by Kamalesh Babulal <kamalesh at linux.vnet.ibm.com>) Signed-off-by: Rusty Russell <rusty at rustcorp.com.au> Cc: Matias Zabaljauregui <zabaljauregui at gmail.com> C...
2009 Sep 21
1
[PATCH 2/5] lguest: use set_pte/set_pmd uniformly for real page table entries
If we're building a pte, we can use simple assigment; only use set_pte etc. when we're actually going to use that destination as a PTE. I don't know that we'll ever run under Xen, but it's neater. And use set_pte/set_pmd rather than assuming native_ versions, even though that's probably true for most people. (Includes compile fix by Kamalesh Babulal <kamalesh at linux.vnet.ibm.com>) Signed-off-by: Rusty Russell <rusty at rustcorp.com.au> Cc: Matias Zabaljauregui <zabaljauregui at gmail.com> C...
2007 Apr 18
1
[PATCH 1/5] Add pagetable allocation notifiers
...24 09:31:05.000000000 -0700 +++ linux-2.6.13/arch/i386/mm/init.c 2005-08-24 09:31:31.000000000 -0700 @@ -79,6 +79,7 @@ static pte_t * __init one_page_table_ini { if (pmd_none(*pmd)) { pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); + SetPagePTE(virt_to_page(page_table)); set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); if (page_table != pte_offset_kernel(pmd, 0)) BUG(); Index: linux-2.6.13/arch/i386/mm/pageattr.c =================================================================== --- linux-2.6.13.orig/arch/i386/mm/pageattr.c 2005-08-24 09:31:05.000000000 -0700...
2007 Apr 18
1
[PATCH 1/5] Add pagetable allocation notifiers
...24 09:31:05.000000000 -0700 +++ linux-2.6.13/arch/i386/mm/init.c 2005-08-24 09:31:31.000000000 -0700 @@ -79,6 +79,7 @@ static pte_t * __init one_page_table_ini { if (pmd_none(*pmd)) { pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); + SetPagePTE(virt_to_page(page_table)); set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); if (page_table != pte_offset_kernel(pmd, 0)) BUG(); Index: linux-2.6.13/arch/i386/mm/pageattr.c =================================================================== --- linux-2.6.13.orig/arch/i386/mm/pageattr.c 2005-08-24 09:31:05.000000000 -0700...
2020 Aug 15
0
[PATCH v4 5/6] x86/paravirt: remove set_pte_at pv-op
...+412,6 @@ static inline void set_pte(pte_t *ptep, pte_t pte) PVOP_VCALL2(mmu.set_pte, ptep, pte.pte); } -static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) -{ - PVOP_VCALL4(mmu.set_pte_at, mm, addr, ptep, pte.pte); -} - static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) { PVOP_VCALL2(mmu.set_pmd, pmdp, native_pmd_val(pmd)); @@ -510,7 +504,7 @@ static inline void set_pte_atomic(pte_t *ptep, pte_t pte) static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - set_pte_at(mm, addr, ptep, __pte(0)); +...
2007 Apr 18
0
[PATCH 1/5] Paravirt page alloc.patch
..., .flush_tlb_single = native_flush_tlb_single, + .alloc_pt = (void *)native_nop, + .alloc_pd = (void *)native_nop, + .alloc_pd_clone = (void *)native_nop, + .release_pt = (void *)native_nop, + .release_pd = (void *)native_nop, + .set_pte = native_set_pte, .set_pte_at = native_set_pte_at, .set_pmd = native_set_pmd, =================================================================== --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c @@ -62,6 +62,7 @@ static pmd_t * __init one_md_table_init( #ifdef CONFIG_X86_PAE pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); + paravirt_allo...
2007 Apr 18
0
[PATCH 1/5] Paravirt page alloc.patch
..., .flush_tlb_single = native_flush_tlb_single, + .alloc_pt = (void *)native_nop, + .alloc_pd = (void *)native_nop, + .alloc_pd_clone = (void *)native_nop, + .release_pt = (void *)native_nop, + .release_pd = (void *)native_nop, + .set_pte = native_set_pte, .set_pte_at = native_set_pte_at, .set_pmd = native_set_pmd, =================================================================== --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c @@ -62,6 +62,7 @@ static pmd_t * __init one_md_table_init( #ifdef CONFIG_X86_PAE pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); + paravirt_allo...
2007 Apr 18
0
[PATCH 1/6] Page allocation hooks for VMI backend
..., .flush_tlb_single = native_flush_tlb_single, + .alloc_pt = (void *)native_nop, + .alloc_pd = (void *)native_nop, + .alloc_pd_clone = (void *)native_nop, + .release_pt = (void *)native_nop, + .release_pd = (void *)native_nop, + .set_pte = native_set_pte, .set_pte_at = native_set_pte_at, .set_pmd = native_set_pmd, =================================================================== --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c @@ -62,6 +62,7 @@ static pmd_t * __init one_md_table_init( #ifdef CONFIG_X86_PAE pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); + paravirt_allo...
2007 Apr 18
0
[PATCH 1/6] Page allocation hooks for VMI backend
..., .flush_tlb_single = native_flush_tlb_single, + .alloc_pt = (void *)native_nop, + .alloc_pd = (void *)native_nop, + .alloc_pd_clone = (void *)native_nop, + .release_pt = (void *)native_nop, + .release_pd = (void *)native_nop, + .set_pte = native_set_pte, .set_pte_at = native_set_pte_at, .set_pmd = native_set_pmd, =================================================================== --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c @@ -62,6 +62,7 @@ static pmd_t * __init one_md_table_init( #ifdef CONFIG_X86_PAE pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); + paravirt_allo...
2020 Aug 15
6
[PATCH v4 0/6] x86/paravirt: cleanup after 32-bit PV removal
A lot of cleanup after removal of 32-bit Xen PV guest support in paravirt code. Changes in V4: - dropped patches 1-3, as already committed - addressed comments to V3 - added new patches 5+6 Changes in V3: - addressed comments to V2 - split patch 1 into 2 patches - new patches 3 and 7 Changes in V2: - rebase to 5.8 kernel - addressed comments to V1 - new patches 3 and 4 Juergen Gross (6):
2007 Apr 18
1
[RFC, PATCH 19/24] i386 Vmi mmu changes
...mm/fault.c =================================================================== --- linux-2.6.16-rc5.orig/arch/i386/mm/fault.c 2006-03-10 12:55:05.000000000 -0800 +++ linux-2.6.16-rc5/arch/i386/mm/fault.c 2006-03-10 15:57:08.000000000 -0800 @@ -552,6 +552,13 @@ vmalloc_fault: goto no_context; set_pmd(pmd, *pmd_k); + /* + * Needed. We have just updated this root with a copy of + * the kernel pmd. To return without flushing would + * introduce a fault loop. + */ + update_mmu_cache(NULL, pmd, pmd_k->pmd); + pte_k = pte_offset_kernel(pmd_k, address); if (!pte_present(*pte_k))...
2007 Apr 18
1
[RFC, PATCH 19/24] i386 Vmi mmu changes
...mm/fault.c =================================================================== --- linux-2.6.16-rc5.orig/arch/i386/mm/fault.c 2006-03-10 12:55:05.000000000 -0800 +++ linux-2.6.16-rc5/arch/i386/mm/fault.c 2006-03-10 15:57:08.000000000 -0800 @@ -552,6 +552,13 @@ vmalloc_fault: goto no_context; set_pmd(pmd, *pmd_k); + /* + * Needed. We have just updated this root with a copy of + * the kernel pmd. To return without flushing would + * introduce a fault loop. + */ + update_mmu_cache(NULL, pmd, pmd_k->pmd); + pte_k = pte_offset_kernel(pmd_k, address); if (!pte_present(*pte_k))...
2008 May 31
1
[PATCH] xen: avoid hypercalls when updating unpinned pud/pmd
...deletions(-) =================================================================== --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -903,6 +903,14 @@ pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(base))); } +static __init void xen_post_allocator_init(void) +{ + pv_mmu_ops.set_pmd = xen_set_pmd; + pv_mmu_ops.set_pud = xen_set_pud; + + xen_mark_init_mm_pinned(); +} + /* This is called once we have the cpu_possible_map */ void xen_setup_vcpu_info_placement(void) { @@ -990,7 +998,7 @@ .banner = xen_banner, .memory_setup = xen_memory_setup, .arch_setup = xen_arch_setup,...
2007 Apr 18
2
[PATCH 1/4] Pte drop ptep_get_and_clear paravirt op.patch
...ong long val = PVOP_CALL1(unsigned long long, ptep_get_and_clear, p); - return (pte_t) { val, val >> 32 }; -} #else /* !CONFIG_X86_PAE */ + static inline pte_t __pte(unsigned long val) { return (pte_t) { PVOP_CALL1(unsigned long, make_pte, val) }; @@ -899,11 +893,6 @@ static inline void set_pmd(pmd_t *pmdp, static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) { PVOP_VCALL2(set_pmd, pmdp, pmdval.pud.pgd.pgd); -} - -static inline pte_t raw_ptep_get_and_clear(pte_t *p) -{ - return (pte_t) { PVOP_CALL1(unsigned long, ptep_get_and_clear, p) }; } #endif /* CONFIG_X86_PAE */ diff -r c0...
2007 Apr 18
2
[PATCH 1/4] Pte drop ptep_get_and_clear paravirt op.patch
...ong long val = PVOP_CALL1(unsigned long long, ptep_get_and_clear, p); - return (pte_t) { val, val >> 32 }; -} #else /* !CONFIG_X86_PAE */ + static inline pte_t __pte(unsigned long val) { return (pte_t) { PVOP_CALL1(unsigned long, make_pte, val) }; @@ -899,11 +893,6 @@ static inline void set_pmd(pmd_t *pmdp, static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) { PVOP_VCALL2(set_pmd, pmdp, pmdval.pud.pgd.pgd); -} - -static inline pte_t raw_ptep_get_and_clear(pte_t *p) -{ - return (pte_t) { PVOP_CALL1(unsigned long, ptep_get_and_clear, p) }; } #endif /* CONFIG_X86_PAE */ diff -r c0...
2007 Apr 18
2
pgd_alloc and [cd]tors
Is there any real use in having a ctor/dtor for the pgd cache? Given that all pgd allocation happens via pgd_alloc/pgd_free, why not just fold the [cd]tor in? I'm asking because Xen wants pgd[3] to be unshared in the PAE case, and it looks to me like the easiest way to handle that is by making pgd_alloc/free pv-ops and doing the appropriate thing in the Xen code. Would need to sort out the
2007 Apr 18
2
pgd_alloc and [cd]tors
Is there any real use in having a ctor/dtor for the pgd cache? Given that all pgd allocation happens via pgd_alloc/pgd_free, why not just fold the [cd]tor in? I'm asking because Xen wants pgd[3] to be unshared in the PAE case, and it looks to me like the easiest way to handle that is by making pgd_alloc/free pv-ops and doing the appropriate thing in the Xen code. Would need to sort out the
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 08/17] paravirt_ops - memory managment
...(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) - -#define pgd_none(x) (!pgd_val(x)) -#define pud_none(x) (!pud_val(x)) +#ifdef CONFIG_PARAVIRT +#include <asm/paravirt.h> +#else +#define set_pte native_set_pte +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) +#define set_pmd native_set_pmd +#define set_pud native_set_pud +#define set_pgd native_set_pgd +#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) +#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) +#define pud_clear native_pud_clear +#define pgd_clear native_pgd_clear +...
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 08/17] paravirt_ops - memory managment
...(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) - -#define pgd_none(x) (!pgd_val(x)) -#define pud_none(x) (!pud_val(x)) +#ifdef CONFIG_PARAVIRT +#include <asm/paravirt.h> +#else +#define set_pte native_set_pte +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) +#define set_pmd native_set_pmd +#define set_pud native_set_pud +#define set_pgd native_set_pgd +#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) +#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) +#define pud_clear native_pud_clear +#define pgd_clear native_pgd_clear +...
2007 Feb 14
2
[PATCH 8/8] 2.6.17: scan DMI early
...id *early_ioremap(unsigned long addr, unsigned long size) { - return ioremap(addr, size); + unsigned long map = round_down(addr, LARGE_PAGE_SIZE); + + /* actually usually some more */ + if (size >= LARGE_PAGE_SIZE) { + printk("SMBIOS area too long %lu\n", size); + return NULL; + } + set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE)); + map += LARGE_PAGE_SIZE; + set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE)); + __flush_tlb(); + return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1)); } /* To avoid virtual aliases later */ __in...