search for: pmd_cach

Displaying 20 results from an estimated 38 matches for "pmd_cach".

Did you mean: pmd_cache
2007 Apr 18
1
[PATCH 1/5] Add pagetable allocation notifiers
...d long flags; /* can be called from interrupt context */ + ClearPagePDE(virt_to_page(pgd)); spin_lock_irqsave(&pgd_lock, flags); pgd_list_del(pgd); spin_unlock_irqrestore(&pgd_lock, flags); @@ -244,13 +246,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm) pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); if (!pmd) goto out_oom; + SetPagePDE(virt_to_page(pmd)); set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); } return pgd; out_oom: - for (i--; i >= 0; i--) + for (i--; i >= 0; i--) { + ClearPagePDE(pfn_to_page(pgd_val(pgd[i]) >> PAGE_SHIFT)); kmem_cache_fre...
2007 Apr 18
1
[PATCH 1/5] Add pagetable allocation notifiers
...d long flags; /* can be called from interrupt context */ + ClearPagePDE(virt_to_page(pgd)); spin_lock_irqsave(&pgd_lock, flags); pgd_list_del(pgd); spin_unlock_irqrestore(&pgd_lock, flags); @@ -244,13 +246,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm) pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); if (!pmd) goto out_oom; + SetPagePDE(virt_to_page(pmd)); set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); } return pgd; out_oom: - for (i--; i >= 0; i--) + for (i--; i >= 0; i--) { + ClearPagePDE(pfn_to_page(pgd_val(pgd[i]) >> PAGE_SHIFT)); kmem_cache_fre...
2007 Apr 18
0
[PATCH 2/2] Use page present for pae pdpes
...goto out_oom; SetPagePDE(virt_to_page(pmd)); - set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); + set_pgd(&pgd[i], __pgd(_PAGE_PRESENT | __pa(pmd))); } return pgd; out_oom: for (i--; i >= 0; i--) { ClearPagePDE(pfn_to_page(pgd_val(pgd[i]) >> PAGE_SHIFT)); - kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); + kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i]) & PAGE_MASK)); } kmem_cache_free(pgd_cache, pgd); return NULL; @@ -268,7 +268,7 @@ void pgd_free(pgd_t *pgd) if (PTRS_PER_PMD > 1) for (i = 0; i < USER_PTRS_PER_PGD; ++i) { ClearP...
2007 Apr 18
0
[PATCH 2/2] Use page present for pae pdpes
...goto out_oom; SetPagePDE(virt_to_page(pmd)); - set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); + set_pgd(&pgd[i], __pgd(_PAGE_PRESENT | __pa(pmd))); } return pgd; out_oom: for (i--; i >= 0; i--) { ClearPagePDE(pfn_to_page(pgd_val(pgd[i]) >> PAGE_SHIFT)); - kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); + kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i]) & PAGE_MASK)); } kmem_cache_free(pgd_cache, pgd); return NULL; @@ -268,7 +268,7 @@ void pgd_free(pgd_t *pgd) if (PTRS_PER_PMD > 1) for (i = 0; i < USER_PTRS_PER_PGD; ++i) { ClearP...
2007 Apr 18
0
[PATCH 1/5] Paravirt page alloc.patch
...n be called from interrupt context */ + paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT); spin_lock_irqsave(&pgd_lock, flags); pgd_list_del(pgd); spin_unlock_irqrestore(&pgd_lock, flags); @@ -274,13 +281,18 @@ pgd_t *pgd_alloc(struct mm_struct *mm) pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); if (!pmd) goto out_oom; + paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT); set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); } return pgd; out_oom: - for (i--; i >= 0; i--) - kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); + for (i--; i >= 0; i--) { +...
2007 Apr 18
0
[PATCH 1/5] Paravirt page alloc.patch
...n be called from interrupt context */ + paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT); spin_lock_irqsave(&pgd_lock, flags); pgd_list_del(pgd); spin_unlock_irqrestore(&pgd_lock, flags); @@ -274,13 +281,18 @@ pgd_t *pgd_alloc(struct mm_struct *mm) pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); if (!pmd) goto out_oom; + paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT); set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); } return pgd; out_oom: - for (i--; i >= 0; i--) - kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); + for (i--; i >= 0; i--) { +...
2007 Apr 18
0
[PATCH 1/6] Page allocation hooks for VMI backend
...n be called from interrupt context */ + paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT); spin_lock_irqsave(&pgd_lock, flags); pgd_list_del(pgd); spin_unlock_irqrestore(&pgd_lock, flags); @@ -274,13 +281,18 @@ pgd_t *pgd_alloc(struct mm_struct *mm) pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); if (!pmd) goto out_oom; + paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT); set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); } return pgd; out_oom: - for (i--; i >= 0; i--) - kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); + for (i--; i >= 0; i--) { +...
2007 Apr 18
0
[PATCH 1/6] Page allocation hooks for VMI backend
...n be called from interrupt context */ + paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT); spin_lock_irqsave(&pgd_lock, flags); pgd_list_del(pgd); spin_unlock_irqrestore(&pgd_lock, flags); @@ -274,13 +281,18 @@ pgd_t *pgd_alloc(struct mm_struct *mm) pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); if (!pmd) goto out_oom; + paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT); set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); } return pgd; out_oom: - for (i--; i >= 0; i--) - kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); + for (i--; i >= 0; i--) { +...
2007 Apr 18
2
pgd_alloc and [cd]tors
Is there any real use in having a ctor/dtor for the pgd cache? Given that all pgd allocation happens via pgd_alloc/pgd_free, why not just fold the [cd]tor in? I'm asking because Xen wants pgd[3] to be unshared in the PAE case, and it looks to me like the easiest way to handle that is by making pgd_alloc/free pv-ops and doing the appropriate thing in the Xen code. Would need to sort out the
2007 Apr 18
2
pgd_alloc and [cd]tors
Is there any real use in having a ctor/dtor for the pgd cache? Given that all pgd allocation happens via pgd_alloc/pgd_free, why not just fold the [cd]tor in? I'm asking because Xen wants pgd[3] to be unshared in the PAE case, and it looks to me like the easiest way to handle that is by making pgd_alloc/free pv-ops and doing the appropriate thing in the Xen code. Would need to sort out the
2007 Apr 18
1
[RFC, PATCH 19/24] i386 Vmi mmu changes
...*/ + mach_setup_pgd(__pa(pgd) >> PAGE_SHIFT, + __pa(swapper_pg_dir) >> PAGE_SHIFT, + USER_PTRS_PER_PGD, + PTRS_PER_PGD - USER_PTRS_PER_PGD); for (i = 0; i < USER_PTRS_PER_PGD; ++i) { pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); if (!pmd) goto out_oom; + mach_setup_pmd(__pa(pmd) >> PAGE_SHIFT); set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); } return pgd; out_oom: - for (i--; i >= 0; i--) + for (i--; i >= 0; i--) { + mach_release_pmd(pgd_val(pgd[i]) >> PAGE_SHIFT); kmem_ca...
2007 Apr 18
1
[RFC, PATCH 19/24] i386 Vmi mmu changes
...*/ + mach_setup_pgd(__pa(pgd) >> PAGE_SHIFT, + __pa(swapper_pg_dir) >> PAGE_SHIFT, + USER_PTRS_PER_PGD, + PTRS_PER_PGD - USER_PTRS_PER_PGD); for (i = 0; i < USER_PTRS_PER_PGD; ++i) { pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); if (!pmd) goto out_oom; + mach_setup_pmd(__pa(pmd) >> PAGE_SHIFT); set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); } return pgd; out_oom: - for (i--; i >= 0; i--) + for (i--; i >= 0; i--) { + mach_release_pmd(pgd_val(pgd[i]) >> PAGE_SHIFT); kmem_ca...
2007 Apr 18
17
[patch 00/17] paravirt_ops updates
Hi Andi, This series of patches updates paravirt_ops in various ways. Some of the changes are plain cleanups and improvements, and some add some interfaces necessary for Xen. The brief overview: add-MAINTAINERS.patch - obvious remove-CONFIG_DEBUG_PARAVIRT.patch - no longer needed paravirt-nop.patch - mark nop operations consistently paravirt-pte-accessors.patch - operations to pack/unpack
2007 Apr 18
17
[patch 00/17] paravirt_ops updates
Hi Andi, This series of patches updates paravirt_ops in various ways. Some of the changes are plain cleanups and improvements, and some add some interfaces necessary for Xen. The brief overview: add-MAINTAINERS.patch - obvious remove-CONFIG_DEBUG_PARAVIRT.patch - no longer needed paravirt-nop.patch - mark nop operations consistently paravirt-pte-accessors.patch - operations to pack/unpack
2007 Oct 12
10
[PATCH 00/10] REVIEW: Xen patches for 2.6.24
This is my current set of updates to Xen for 2.6.24. This is largely a bugfix set, and a couple of them are also relevent to 2.6.23. These are in the pre-x86 merge form; I'll update them once the merge goes into git. Quick overview: - remove some dead code in arch/i386/mm/init.c - clean up some duplicate includes - when sending an IPI, yield the vcpu if the destination doesn't have
2007 Oct 12
10
[PATCH 00/10] REVIEW: Xen patches for 2.6.24
This is my current set of updates to Xen for 2.6.24. This is largely a bugfix set, and a couple of them are also relevent to 2.6.23. These are in the pre-x86 merge form; I'll update them once the merge goes into git. Quick overview: - remove some dead code in arch/i386/mm/init.c - clean up some duplicate includes - when sending an IPI, yield the vcpu if the destination doesn't have
2007 Oct 12
10
[PATCH 00/10] REVIEW: Xen patches for 2.6.24
This is my current set of updates to Xen for 2.6.24. This is largely a bugfix set, and a couple of them are also relevent to 2.6.23. These are in the pre-x86 merge form; I'll update them once the merge goes into git. Quick overview: - remove some dead code in arch/i386/mm/init.c - clean up some duplicate includes - when sending an IPI, yield the vcpu if the destination doesn't have
2007 Apr 18
23
[patch 00/20] paravirt_ops updates
Hi Andi, Here's a repost of the paravirt_ops update series I posted the other day. Since then, I found a few potential bugs with patching clobbering, cleaned up and documented paravirt.h and the patching machinery. Overview: add-MAINTAINERS.patch obvious remove-CONFIG_DEBUG_PARAVIRT.patch No longer meaningful or needed. paravirt-nop.patch Clean up nop paravirt_ops functions, mainly to
2007 Apr 18
23
[patch 00/20] paravirt_ops updates
Hi Andi, Here's a repost of the paravirt_ops update series I posted the other day. Since then, I found a few potential bugs with patching clobbering, cleaned up and documented paravirt.h and the patching machinery. Overview: add-MAINTAINERS.patch obvious remove-CONFIG_DEBUG_PARAVIRT.patch No longer meaningful or needed. paravirt-nop.patch Clean up nop paravirt_ops functions, mainly to
2006 Mar 14
12
[RFC] VMI for Xen?
I''m sure everyone has seen the drop of VMI patches for Linux at this point, but just in case, the link is included below. I''ve read this version of the VMI spec and have made my way through most of the patches. While I wasn''t really that impressed with the first spec wrt Xen, the second version seems to be much more palatable. Specifically, the code inlining and