search for: pud_offset

Displaying 20 results from an estimated 101 matches for "pud_offset".

Did you mean: pmd_offset
2007 Apr 18
1
Is this a typo?
...*pgd) { pud_t *pud; pmd_t *pmd_table; #ifdef CONFIG_X86_PAE pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); paravirt_alloc_pmd(__pa(page_table) >> PAGE_SHIFT); ^^^^^^^^^^ pmd_table? set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); pud = pud_offset(pgd, 0); if (pmd_table != pmd_offset(pud, 0)) BUG(); #else pud = pud_offset(pgd, 0); pmd_table = pmd_offset(pud, 0); #endif return pmd_table; } Also, did you anticipate that these paravirt_alloc_* functions would be useful for Xen to maintain the "pagetable pages must be RO" co...
2007 Apr 18
1
Is this a typo?
...*pgd) { pud_t *pud; pmd_t *pmd_table; #ifdef CONFIG_X86_PAE pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); paravirt_alloc_pmd(__pa(page_table) >> PAGE_SHIFT); ^^^^^^^^^^ pmd_table? set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); pud = pud_offset(pgd, 0); if (pmd_table != pmd_offset(pud, 0)) BUG(); #else pud = pud_offset(pgd, 0); pmd_table = pmd_offset(pud, 0); #endif return pmd_table; } Also, did you anticipate that these paravirt_alloc_* functions would be useful for Xen to maintain the "pagetable pages must be RO" co...
2008 May 15
0
[PATCH] linux/x86: utilize lookup_address() for virt_to_ptep()
...table(void *va, unsign void make_pages_readonly(void *va, unsigned int nr, unsigned int feature); void make_pages_writable(void *va, unsigned int nr, unsigned int feature); -#define virt_to_ptep(__va) \ -({ \ - pgd_t *__pgd = pgd_offset_k((unsigned long)(__va)); \ - pud_t *__pud = pud_offset(__pgd, (unsigned long)(__va)); \ - pmd_t *__pmd = pmd_offset(__pud, (unsigned long)(__va)); \ - pte_offset_kernel(__pmd, (unsigned long)(__va)); \ -}) - -#define arbitrary_virt_to_machine(__va) \ -({ \ - maddr_t m = (maddr_t)pte_mfn(*virt_to_ptep(__va)) << PAGE_SHIFT;\ - m | ((un...
2006 Mar 14
12
[RFC] VMI for Xen?
I''m sure everyone has seen the drop of VMI patches for Linux at this point, but just in case, the link is included below. I''ve read this version of the VMI spec and have made my way through most of the patches. While I wasn''t really that impressed with the first spec wrt Xen, the second version seems to be much more palatable. Specifically, the code inlining and
2007 Apr 18
0
[PATCH 4/5] Add address translation
...okup_address(unsigned long address); /* + * Helper function that returns physical page for virtual address. + * This assumes the mapping is valid. + */ +#define virt_to_pfn(_address) \ +({ \ + unsigned long long __paddr; \ + pgd_t *pgd = pgd_offset_k(_address); \ + pud_t *pud = pud_offset(pgd, (_address)); \ + pmd_t *pmd = pmd_offset(pud, (_address)); \ + if (pmd_large(*pmd)) \ + __paddr = (pmd_val(*pmd) & LARGE_PAGE_MASK) | \ + ((_address) & ~LARGE_PAGE_MASK); \ + else { \ + pte_t *pte = pte_offset_kernel(pmd, (_address));\ + __paddr = (pte_val(*pte) &...
2005 Apr 15
0
[PATCH] Eliminate kernel version checks from i386/kernel/pci-dma.c
...04-15 11:11:09 -07:00 @@ -14,14 +14,7 @@ #include <linux/version.h> #include <asm/io.h> #include <asm-xen/balloon.h> - -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) #include <asm/tlbflush.h> -#else -#define pte_offset_kernel pte_offset -#define pud_t pgd_t -#define pud_offset(d, va) d -#endif struct dma_coherent_mem { void *virt_base; @@ -85,24 +78,13 @@ xen_contig_memory(unsigned long vstart, balloon_unlock(flags); } -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) -void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, - dma_addr_t *dma_hand...
2007 Apr 18
0
[PATCH 4/5] Add address translation
...okup_address(unsigned long address); /* + * Helper function that returns physical page for virtual address. + * This assumes the mapping is valid. + */ +#define virt_to_pfn(_address) \ +({ \ + unsigned long long __paddr; \ + pgd_t *pgd = pgd_offset_k(_address); \ + pud_t *pud = pud_offset(pgd, (_address)); \ + pmd_t *pmd = pmd_offset(pud, (_address)); \ + if (pmd_large(*pmd)) \ + __paddr = (pmd_val(*pmd) & LARGE_PAGE_MASK) | \ + ((_address) & ~LARGE_PAGE_MASK); \ + else { \ + pte_t *pte = pte_offset_kernel(pmd, (_address));\ + __paddr = (pte_val(*pte) &...
2020 Apr 28
0
[PATCH v3 22/75] x86/boot/compressed/64: Add set_page_en/decrypted() helpers
...access won't be optimized away. + */ + target = (unsigned long *)address; + scratch = *target; + arch_cmpxchg(target, scratch, scratch); + + /* + * The page is mapped at least with PMD size - so skip checks and walk + * directly to the PMD. + */ + p4dp = p4d_offset(pgdp, address); + pudp = pud_offset(p4dp, address); + pmdp = pmd_offset(pudp, address); + + if (pmd_large(*pmdp)) + ptep = split_large_pmd(info, pmdp, address); + else + ptep = pte_offset_kernel(pmdp, address); + + if (!ptep) + return -ENOMEM; + + /* + * Changing encryption attributes of a page requires to flush it from + * the...
2008 Jun 19
0
[PATCH] ia64/xen: implement the arch specific part of xencomm.
...dresses. */ + if (vaddr >= KERNEL_START + && vaddr < (KERNEL_START + KERNEL_TR_PAGE_SIZE)) + return vaddr - kernel_virtual_offset; + + /* In kernel area -- virtually mapped. */ + pgd = pgd_offset_k(vaddr); + if (pgd_none(*pgd) || pgd_bad(*pgd)) + return ~0UL; + + pud = pud_offset(pgd, vaddr); + if (pud_none(*pud) || pud_bad(*pud)) + return ~0UL; + + pmd = pmd_offset(pud, vaddr); + if (pmd_none(*pmd) || pmd_bad(*pmd)) + return ~0UL; + + ptep = pte_offset_kernel(pmd, vaddr); + if (!ptep) + return ~0UL; + + return (pte_val(*ptep) & _PFN_MASK) | (vaddr & ~PA...
2008 Jun 19
0
[PATCH] ia64/xen: implement the arch specific part of xencomm.
...dresses. */ + if (vaddr >= KERNEL_START + && vaddr < (KERNEL_START + KERNEL_TR_PAGE_SIZE)) + return vaddr - kernel_virtual_offset; + + /* In kernel area -- virtually mapped. */ + pgd = pgd_offset_k(vaddr); + if (pgd_none(*pgd) || pgd_bad(*pgd)) + return ~0UL; + + pud = pud_offset(pgd, vaddr); + if (pud_none(*pud) || pud_bad(*pud)) + return ~0UL; + + pmd = pmd_offset(pud, vaddr); + if (pmd_none(*pmd) || pmd_bad(*pmd)) + return ~0UL; + + ptep = pte_offset_kernel(pmd, vaddr); + if (!ptep) + return ~0UL; + + return (pte_val(*ptep) & _PFN_MASK) | (vaddr & ~PA...
2007 Apr 18
0
[PATCH 1/5] Paravirt page alloc.patch
...nit.c +++ b/arch/i386/mm/init.c @@ -62,6 +62,7 @@ static pmd_t * __init one_md_table_init( #ifdef CONFIG_X86_PAE pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); + paravirt_alloc_pd(__pa(pmd_table) >> PAGE_SHIFT); set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); pud = pud_offset(pgd, 0); if (pmd_table != pmd_offset(pud, 0)) @@ -82,6 +83,7 @@ static pte_t * __init one_page_table_ini { if (pmd_none(*pmd)) { pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); + paravirt_alloc_pt(__pa(page_table) >> PAGE_SHIFT); set_pmd(pmd, __pmd(__pa(page_tab...
2007 Apr 18
0
[PATCH 1/5] Paravirt page alloc.patch
...nit.c +++ b/arch/i386/mm/init.c @@ -62,6 +62,7 @@ static pmd_t * __init one_md_table_init( #ifdef CONFIG_X86_PAE pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); + paravirt_alloc_pd(__pa(pmd_table) >> PAGE_SHIFT); set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); pud = pud_offset(pgd, 0); if (pmd_table != pmd_offset(pud, 0)) @@ -82,6 +83,7 @@ static pte_t * __init one_page_table_ini { if (pmd_none(*pmd)) { pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); + paravirt_alloc_pt(__pa(page_table) >> PAGE_SHIFT); set_pmd(pmd, __pmd(__pa(page_tab...
2007 Apr 18
0
[PATCH 1/6] Page allocation hooks for VMI backend
...nit.c +++ b/arch/i386/mm/init.c @@ -62,6 +62,7 @@ static pmd_t * __init one_md_table_init( #ifdef CONFIG_X86_PAE pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); + paravirt_alloc_pd(__pa(pmd_table) >> PAGE_SHIFT); set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); pud = pud_offset(pgd, 0); if (pmd_table != pmd_offset(pud, 0)) @@ -82,6 +83,7 @@ static pte_t * __init one_page_table_ini { if (pmd_none(*pmd)) { pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); + paravirt_alloc_pt(__pa(page_table) >> PAGE_SHIFT); set_pmd(pmd, __pmd(__pa(page_tab...
2007 Apr 18
0
[PATCH 1/6] Page allocation hooks for VMI backend
...nit.c +++ b/arch/i386/mm/init.c @@ -62,6 +62,7 @@ static pmd_t * __init one_md_table_init( #ifdef CONFIG_X86_PAE pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); + paravirt_alloc_pd(__pa(pmd_table) >> PAGE_SHIFT); set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); pud = pud_offset(pgd, 0); if (pmd_table != pmd_offset(pud, 0)) @@ -82,6 +83,7 @@ static pte_t * __init one_page_table_ini { if (pmd_none(*pmd)) { pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); + paravirt_alloc_pt(__pa(page_table) >> PAGE_SHIFT); set_pmd(pmd, __pmd(__pa(page_tab...
2007 Apr 18
2
pgd_alloc and [cd]tors
Is there any real use in having a ctor/dtor for the pgd cache? Given that all pgd allocation happens via pgd_alloc/pgd_free, why not just fold the [cd]tor in? I'm asking because Xen wants pgd[3] to be unshared in the PAE case, and it looks to me like the easiest way to handle that is by making pgd_alloc/free pv-ops and doing the appropriate thing in the Xen code. Would need to sort out the
2007 Apr 18
2
pgd_alloc and [cd]tors
Is there any real use in having a ctor/dtor for the pgd cache? Given that all pgd allocation happens via pgd_alloc/pgd_free, why not just fold the [cd]tor in? I'm asking because Xen wants pgd[3] to be unshared in the PAE case, and it looks to me like the easiest way to handle that is by making pgd_alloc/free pv-ops and doing the appropriate thing in the Xen code. Would need to sort out the
2006 Oct 18
21
[PATCH][RESEND] PV drivers for HVM guests
I am enclosing the patch I have been working on. I have tested this on sles9 (2.6.5) and RHEL 4 (2.6.9). The patch applies cleanly on ChangeSet 11635. This patch was cleaned up based on Ian''s and DOI''s work. Signed-off-by: K. Y. Srinivasan (ksrinivasan@novell.com) _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com
2007 Apr 18
1
[RFC/PATCH LGUEST X86_64 01/13] HV VM Fix map area for HV.
...t)); +} + +static inline void hvvm_pmd_unmap(pud_t *pud, unsigned long addr) +{ + pmd_t *pmd; + + pmd = pmd_offset(pud, addr); + if (pmd_none_or_clear_bad(pmd)) + return; + hvvm_pte_unmap(pmd, addr); +} + +static inline void hvvm_pud_unmap(pgd_t *pgd, unsigned long addr) +{ + pud_t *pud; + + pud = pud_offset(pgd, addr); + if (pud_none_or_clear_bad(pud)) + return; + hvvm_pmd_unmap(pud, addr); +} + +static void hvvm_unmap_page(unsigned long addr) +{ + pgd_t *pgd; + + pgd = pgd_offset_k(addr); + hvvm_pud_unmap(pgd, addr); +} + +static int hvvm_pte_alloc(pmd_t *pmd, unsigned long addr, + unsigned long...
2007 Apr 18
1
[RFC/PATCH LGUEST X86_64 01/13] HV VM Fix map area for HV.
...t)); +} + +static inline void hvvm_pmd_unmap(pud_t *pud, unsigned long addr) +{ + pmd_t *pmd; + + pmd = pmd_offset(pud, addr); + if (pmd_none_or_clear_bad(pmd)) + return; + hvvm_pte_unmap(pmd, addr); +} + +static inline void hvvm_pud_unmap(pgd_t *pgd, unsigned long addr) +{ + pud_t *pud; + + pud = pud_offset(pgd, addr); + if (pud_none_or_clear_bad(pud)) + return; + hvvm_pmd_unmap(pud, addr); +} + +static void hvvm_unmap_page(unsigned long addr) +{ + pgd_t *pgd; + + pgd = pgd_offset_k(addr); + hvvm_pud_unmap(pgd, addr); +} + +static int hvvm_pte_alloc(pmd_t *pmd, unsigned long addr, + unsigned long...
2007 Nov 09
11
[PATCH 0/24] paravirt_ops for unified x86 - that's me again!
Hey folks, Here's a new spin of the pvops64 patch series. We didn't get that many comments from the last time, so it should be probably almost ready to get in. Heya! >From the last version, the most notable changes are: * consolidation of system.h, merging jeremy's comments about ordering concerns * consolidation of smp functions that goes through smp_ops. They're sharing