search for: __pgd

Displaying 20 results from an estimated 115 matches for "__pgd".

2012 Nov 16
1
[PATCH v4] x86/xen: Use __pa_symbol instead of __pa on C visible symbols
...(+), 10 deletions(-) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 4a05b39..a63e5f9 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1486,7 +1486,8 @@ static int xen_pgd_alloc(struct mm_struct *mm) if (user_pgd != NULL) { user_pgd[pgd_index(VSYSCALL_START)] = - __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE); + __pgd(__pa_symbol(level3_user_vsyscall) | + _PAGE_TABLE); ret = 0; } @@ -1958,10 +1959,10 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) * pgd. */ if (xen_feature(XENFEAT_writable_page_tables)) {...
2012 Nov 16
1
[PATCH v4] x86/xen: Use __pa_symbol instead of __pa on C visible symbols
...(+), 10 deletions(-) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 4a05b39..a63e5f9 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1486,7 +1486,8 @@ static int xen_pgd_alloc(struct mm_struct *mm) if (user_pgd != NULL) { user_pgd[pgd_index(VSYSCALL_START)] = - __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE); + __pgd(__pa_symbol(level3_user_vsyscall) | + _PAGE_TABLE); ret = 0; } @@ -1958,10 +1959,10 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) * pgd. */ if (xen_feature(XENFEAT_writable_page_tables)) {...
2007 Apr 18
0
[PATCH 2/2] Use page present for pae pdpes
...ux-2.6.13.orig/arch/i386/mm/pgtable.c 2005-08-31 14:48:17.000000000 -0700 +++ linux-2.6.13/arch/i386/mm/pgtable.c 2005-08-31 14:48:53.000000000 -0700 @@ -247,14 +247,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm) if (!pmd) goto out_oom; SetPagePDE(virt_to_page(pmd)); - set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); + set_pgd(&pgd[i], __pgd(_PAGE_PRESENT | __pa(pmd))); } return pgd; out_oom: for (i--; i >= 0; i--) { ClearPagePDE(pfn_to_page(pgd_val(pgd[i]) >> PAGE_SHIFT)); - kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); + kmem_cache_free(pmd_cache, (v...
2007 Apr 18
0
[PATCH 2/2] Use page present for pae pdpes
...ux-2.6.13.orig/arch/i386/mm/pgtable.c 2005-08-31 14:48:17.000000000 -0700 +++ linux-2.6.13/arch/i386/mm/pgtable.c 2005-08-31 14:48:53.000000000 -0700 @@ -247,14 +247,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm) if (!pmd) goto out_oom; SetPagePDE(virt_to_page(pmd)); - set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); + set_pgd(&pgd[i], __pgd(_PAGE_PRESENT | __pa(pmd))); } return pgd; out_oom: for (i--; i >= 0; i--) { ClearPagePDE(pfn_to_page(pgd_val(pgd[i]) >> PAGE_SHIFT)); - kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); + kmem_cache_free(pmd_cache, (v...
2008 May 15
0
[PATCH] linux/x86: utilize lookup_address() for virt_to_ptep()
...13:38:59.000000000 +0200 @@ -477,19 +477,16 @@ void make_page_writable(void *va, unsign void make_pages_readonly(void *va, unsigned int nr, unsigned int feature); void make_pages_writable(void *va, unsigned int nr, unsigned int feature); -#define virt_to_ptep(__va) \ -({ \ - pgd_t *__pgd = pgd_offset_k((unsigned long)(__va)); \ - pud_t *__pud = pud_offset(__pgd, (unsigned long)(__va)); \ - pmd_t *__pmd = pmd_offset(__pud, (unsigned long)(__va)); \ - pte_offset_kernel(__pmd, (unsigned long)(__va)); \ -}) - -#define arbitrary_virt_to_machine(__va) \ -({ \ - maddr_t m =...
2007 Apr 18
0
[PATCH 1/5] Paravirt page alloc.patch
...============================ --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c @@ -62,6 +62,7 @@ static pmd_t * __init one_md_table_init( #ifdef CONFIG_X86_PAE pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); + paravirt_alloc_pd(__pa(pmd_table) >> PAGE_SHIFT); set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); pud = pud_offset(pgd, 0); if (pmd_table != pmd_offset(pud, 0)) @@ -82,6 +83,7 @@ static pte_t * __init one_page_table_ini { if (pmd_none(*pmd)) { pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); + paravirt_alloc_pt(__pa(page_table) &gt...
2007 Apr 18
0
[PATCH 1/5] Paravirt page alloc.patch
...============================ --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c @@ -62,6 +62,7 @@ static pmd_t * __init one_md_table_init( #ifdef CONFIG_X86_PAE pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); + paravirt_alloc_pd(__pa(pmd_table) >> PAGE_SHIFT); set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); pud = pud_offset(pgd, 0); if (pmd_table != pmd_offset(pud, 0)) @@ -82,6 +83,7 @@ static pte_t * __init one_page_table_ini { if (pmd_none(*pmd)) { pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); + paravirt_alloc_pt(__pa(page_table) &gt...
2009 Jun 05
1
[PATCH] lguest: PAE support
...not present? We can't map it in. */ @@ -228,12 +309,40 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) check_gpgd(cpu, gpgd); /* And we copy the flags to the shadow PGD entry. The page * number in the shadow PGD is the page we just allocated. */ - *spgd = __pgd(__pa(ptepage) | pgd_flags(gpgd)); + set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags(gpgd))); } +#ifdef CONFIG_X86_PAE + gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); + /* middle level not present? We can't map it in. */ + if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) + return false; + +...
2009 Jun 05
1
[PATCH] lguest: PAE support
...not present? We can't map it in. */ @@ -228,12 +309,40 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) check_gpgd(cpu, gpgd); /* And we copy the flags to the shadow PGD entry. The page * number in the shadow PGD is the page we just allocated. */ - *spgd = __pgd(__pa(ptepage) | pgd_flags(gpgd)); + set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags(gpgd))); } +#ifdef CONFIG_X86_PAE + gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); + /* middle level not present? We can't map it in. */ + if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) + return false; + +...
2007 Apr 18
0
[PATCH 1/6] Page allocation hooks for VMI backend
...============================ --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c @@ -62,6 +62,7 @@ static pmd_t * __init one_md_table_init( #ifdef CONFIG_X86_PAE pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); + paravirt_alloc_pd(__pa(pmd_table) >> PAGE_SHIFT); set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); pud = pud_offset(pgd, 0); if (pmd_table != pmd_offset(pud, 0)) @@ -82,6 +83,7 @@ static pte_t * __init one_page_table_ini { if (pmd_none(*pmd)) { pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); + paravirt_alloc_pt(__pa(page_table) &gt...
2007 Apr 18
0
[PATCH 1/6] Page allocation hooks for VMI backend
...============================ --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c @@ -62,6 +62,7 @@ static pmd_t * __init one_md_table_init( #ifdef CONFIG_X86_PAE pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); + paravirt_alloc_pd(__pa(pmd_table) >> PAGE_SHIFT); set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); pud = pud_offset(pgd, 0); if (pmd_table != pmd_offset(pud, 0)) @@ -82,6 +83,7 @@ static pte_t * __init one_page_table_ini { if (pmd_none(*pmd)) { pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); + paravirt_alloc_pt(__pa(page_table) &gt...
2020 Aug 10
1
[PATCH v3 4/7] x86/paravirt: remove 32-bit support from PARAVIRT_XXL
...now simply return (pte_t) ret? > > I don't think so, but I can turn it into > > ? return native_make_pte(PVOP_CALLEE1(...)); I thought that since now this is only built for 64-bit we don't have to worry about different pte_t definitions and can do what we do for example, for __pgd()? -boris
2007 Apr 18
2
pgd_alloc and [cd]tors
Is there any real use in having a ctor/dtor for the pgd cache? Given that all pgd allocation happens via pgd_alloc/pgd_free, why not just fold the [cd]tor in? I'm asking because Xen wants pgd[3] to be unshared in the PAE case, and it looks to me like the easiest way to handle that is by making pgd_alloc/free pv-ops and doing the appropriate thing in the Xen code. Would need to sort out the
2007 Apr 18
2
pgd_alloc and [cd]tors
Is there any real use in having a ctor/dtor for the pgd cache? Given that all pgd allocation happens via pgd_alloc/pgd_free, why not just fold the [cd]tor in? I'm asking because Xen wants pgd[3] to be unshared in the PAE case, and it looks to me like the easiest way to handle that is by making pgd_alloc/free pv-ops and doing the appropriate thing in the Xen code. Would need to sort out the
2009 Sep 21
1
[PATCH 2/5] lguest: use set_pte/set_pmd uniformly for real page table entries
...USER)); + pmd = pfn_pmd(((unsigned long)&linear[i] - mem_base)/PAGE_SIZE, + __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); if (copy_to_user(&pmds[j], &pmd, sizeof(pmd)) != 0) return -EFAULT; } /* One PGD entry, pointing to that PMD page. */ - set_pgd(&pgd, __pgd(((u32)pmds - mem_base) | _PAGE_PRESENT)); + pgd = __pgd(((unsigned long)pmds - mem_base) | _PAGE_PRESENT); /* Copy it in as the first PGD entry (ie. addresses 0-1G). */ if (copy_to_user(&pgdir[0], &pgd, sizeof(pgd)) != 0) return -EFAULT; @@ -1141,15 +1140,13 @@ void map_switcher_in_g...
2009 Sep 21
1
[PATCH 2/5] lguest: use set_pte/set_pmd uniformly for real page table entries
...USER)); + pmd = pfn_pmd(((unsigned long)&linear[i] - mem_base)/PAGE_SIZE, + __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); if (copy_to_user(&pmds[j], &pmd, sizeof(pmd)) != 0) return -EFAULT; } /* One PGD entry, pointing to that PMD page. */ - set_pgd(&pgd, __pgd(((u32)pmds - mem_base) | _PAGE_PRESENT)); + pgd = __pgd(((unsigned long)pmds - mem_base) | _PAGE_PRESENT); /* Copy it in as the first PGD entry (ie. addresses 0-1G). */ if (copy_to_user(&pgdir[0], &pgd, sizeof(pgd)) != 0) return -EFAULT; @@ -1141,15 +1140,13 @@ void map_switcher_in_g...
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 08/17] paravirt_ops - memory managment
...ravirt.h> +#else + #define pte_val(x) ((x).pte) #define pmd_val(x) ((x).pmd) #define pud_val(x) ((x).pud) #define pgd_val(x) ((x).pgd) -#define pgprot_val(x) ((x).pgprot) #define __pte(x) ((pte_t) { (x) } ) #define __pmd(x) ((pmd_t) { (x) } ) #define __pud(x) ((pud_t) { (x) } ) #define __pgd(x) ((pgd_t) { (x) } ) +#endif /* CONFIG_PARAVIRT */ + +#define pgprot_val(x) ((x).pgprot) #define __pgprot(x) ((pgprot_t) { (x) } ) #define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START) Index: clean-start/include/asm-x86_64/pgtable.h ===================================================...
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 08/17] paravirt_ops - memory managment
...ravirt.h> +#else + #define pte_val(x) ((x).pte) #define pmd_val(x) ((x).pmd) #define pud_val(x) ((x).pud) #define pgd_val(x) ((x).pgd) -#define pgprot_val(x) ((x).pgprot) #define __pte(x) ((pte_t) { (x) } ) #define __pmd(x) ((pmd_t) { (x) } ) #define __pud(x) ((pud_t) { (x) } ) #define __pgd(x) ((pgd_t) { (x) } ) +#endif /* CONFIG_PARAVIRT */ + +#define pgprot_val(x) ((x).pgprot) #define __pgprot(x) ((pgprot_t) { (x) } ) #define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START) Index: clean-start/include/asm-x86_64/pgtable.h ===================================================...
2007 Apr 18
1
[PATCH 1/5] Add pagetable allocation notifiers
...(&pgd_lock, flags); pgd_list_del(pgd); spin_unlock_irqrestore(&pgd_lock, flags); @@ -244,13 +246,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm) pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); if (!pmd) goto out_oom; + SetPagePDE(virt_to_page(pmd)); set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); } return pgd; out_oom: - for (i--; i >= 0; i--) + for (i--; i >= 0; i--) { + ClearPagePDE(pfn_to_page(pgd_val(pgd[i]) >> PAGE_SHIFT)); kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); + } kmem_cache_free(pgd_cache, pgd); return NULL; } @@ -26...
2007 Apr 18
1
[PATCH 1/5] Add pagetable allocation notifiers
...(&pgd_lock, flags); pgd_list_del(pgd); spin_unlock_irqrestore(&pgd_lock, flags); @@ -244,13 +246,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm) pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); if (!pmd) goto out_oom; + SetPagePDE(virt_to_page(pmd)); set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); } return pgd; out_oom: - for (i--; i >= 0; i--) + for (i--; i >= 0; i--) { + ClearPagePDE(pfn_to_page(pgd_val(pgd[i]) >> PAGE_SHIFT)); kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); + } kmem_cache_free(pgd_cache, pgd); return NULL; } @@ -26...