search for: _page_present

Displaying 20 results from an estimated 147 matches for "_page_present".

2008 Jan 18
0
[PATCH] minios: support COW for a zero page
...18,6 +118,46 @@ void page_walk(unsigned long virt_addres } +static int handle_cow(unsigned long addr) { + pgentry_t *tab = (pgentry_t *)start_info.pt_base, page; + unsigned long new_page; + int rc; + +#if defined(__x86_64__) + page = tab[l4_table_offset(addr)]; + if (!(page & _PAGE_PRESENT)) + return 0; + tab = pte_to_virt(page); +#endif +#if defined(__x86_64__) || defined(CONFIG_X86_PAE) + page = tab[l3_table_offset(addr)]; + if (!(page & _PAGE_PRESENT)) + return 0; + tab = pte_to_virt(page); +#endif + page = tab[l2_table_offset(addr)]; + if (...
2009 Apr 16
1
NULL pointer dereference at __switch_to() ( __unlazy_fpu ) with lguest PAE patch
...r addresses. */ + if (pgd_index(vaddr) == SWITCHER_PGD_INDEX && + index >= SWITCHER_PMD_INDEX) { + kill_guest(cpu, "attempt to access switcher pages"); + index = 0; + } + + /* You should never call this if the PGD entry wasn't valid */ + BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT)); + + page = __va(pgd_pfn(spgd) << PAGE_SHIFT); + return &page[index]; +} +#endif + /* This routine then takes the page directory entry returned above, which * contains the address of the page table entry (PTE) page. It then returns a * pointer to the PTE entry for the given addres...
2009 Apr 16
1
NULL pointer dereference at __switch_to() ( __unlazy_fpu ) with lguest PAE patch
...r addresses. */ + if (pgd_index(vaddr) == SWITCHER_PGD_INDEX && + index >= SWITCHER_PMD_INDEX) { + kill_guest(cpu, "attempt to access switcher pages"); + index = 0; + } + + /* You should never call this if the PGD entry wasn't valid */ + BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT)); + + page = __va(pgd_pfn(spgd) << PAGE_SHIFT); + return &page[index]; +} +#endif + /* This routine then takes the page directory entry returned above, which * contains the address of the page table entry (PTE) page. It then returns a * pointer to the PTE entry for the given addres...
2009 Jun 05
1
[PATCH] lguest: PAE support
...#define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1) +/* For PAE we need the PMD index as well. We use the last 2MB, so we + * will need the last pmd entry of the last pmd page. */ +#ifdef CONFIG_X86_PAE +#define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1) +#define RESERVE_MEM 2U +#define CHECK_GPGD_MASK _PAGE_PRESENT +#else +#define RESERVE_MEM 4U +#define CHECK_GPGD_MASK _PAGE_TABLE +#endif + /* We actually need a separate PTE page for each CPU. Remember that after the * Switcher code itself comes two pages for each CPU, and we don't want this * CPU's guest to see the pages of any other CPU. *...
2009 Jun 05
1
[PATCH] lguest: PAE support
...#define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1) +/* For PAE we need the PMD index as well. We use the last 2MB, so we + * will need the last pmd entry of the last pmd page. */ +#ifdef CONFIG_X86_PAE +#define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1) +#define RESERVE_MEM 2U +#define CHECK_GPGD_MASK _PAGE_PRESENT +#else +#define RESERVE_MEM 4U +#define CHECK_GPGD_MASK _PAGE_TABLE +#endif + /* We actually need a separate PTE page for each CPU. Remember that after the * Switcher code itself comes two pages for each CPU, and we don't want this * CPU's guest to see the pages of any other CPU. *...
2009 Sep 21
1
[PATCH 2/5] lguest: use set_pte/set_pmd uniformly for real page table entries
...@ -983,16 +983,15 @@ static unsigned long setup_pagetables(st */ for (i = j = 0; i < mapped_pages && j < PTRS_PER_PMD; i += PTRS_PER_PTE, j++) { - /* FIXME: native_set_pmd is overkill here. */ - native_set_pmd(&pmd, __pmd(((unsigned long)(linear + i) - - mem_base) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); + pmd = pfn_pmd(((unsigned long)&linear[i] - mem_base)/PAGE_SIZE, + __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); if (copy_to_user(&pmds[j], &pmd, sizeof(pmd)) != 0) return -EFAULT; } /* One PGD entry, pointing to that PMD page. */...
2009 Sep 21
1
[PATCH 2/5] lguest: use set_pte/set_pmd uniformly for real page table entries
...@ -983,16 +983,15 @@ static unsigned long setup_pagetables(st */ for (i = j = 0; i < mapped_pages && j < PTRS_PER_PMD; i += PTRS_PER_PTE, j++) { - /* FIXME: native_set_pmd is overkill here. */ - native_set_pmd(&pmd, __pmd(((unsigned long)(linear + i) - - mem_base) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); + pmd = pfn_pmd(((unsigned long)&linear[i] - mem_base)/PAGE_SIZE, + __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); if (copy_to_user(&pmds[j], &pmd, sizeof(pmd)) != 0) return -EFAULT; } /* One PGD entry, pointing to that PMD page. */...
2007 May 14
0
[PATCH] x86: ptwr adjustments
.../xen/arch/x86/mm.c 2007-05-14 13:43:50.000000000 +0200 +++ 2007-05-14/xen/arch/x86/mm.c 2007-05-14 13:44:25.000000000 +0200 @@ -3238,13 +3238,14 @@ static int ptwr_emulated_update( /* We are looking only for read-only mappings of p.t. pages. */ ASSERT((l1e_get_flags(pte) & (_PAGE_RW|_PAGE_PRESENT)) == _PAGE_PRESENT); + ASSERT(mfn_valid(mfn)); ASSERT((page->u.inuse.type_info & PGT_type_mask) == PGT_l1_page_table); ASSERT((page->u.inuse.type_info & PGT_count_mask) != 0); ASSERT(page_get_owner(page) == d); /* Check the new PTE. */ nl1e = l1e_from_int...
2005 Mar 14
4
[patch/unstable] page table cleanups
...(unsigned * Only propagate to shadow if _PAGE_ACCESSED is set in the guest. * Otherwise, to ensure coherency, we blow away the existing shadow value. */ - if ( gpde & _PAGE_ACCESSED ) + if ( l2_pgentry_val(gpde) & _PAGE_ACCESSED ) { - sl1mfn = (gpde & _PAGE_PRESENT) ? - __shadow_status(current->domain, gpde >> PAGE_SHIFT) : 0; + sl1mfn = (l2_pgentry_val(gpde) & _PAGE_PRESENT) ? + __shadow_status(current->domain, l2_pgentry_val(gpde) >> PAGE_SHIFT) : 0; l2pde_general(current->domain, &gpde, &am...
2008 Jan 17
1
[PATCH 0/7] More lguest massage.
This series takes one more step towards cpu-ification of lguest. As for rusty's last suggestion, I get rid of the whole bunch of "struct lguest *lg = cpu->lg" statements around by using lg_cpu as our base structure wherever it matters. (this saves us 11 lines)
2008 Jan 17
1
[PATCH 0/7] More lguest massage.
This series takes one more step towards cpu-ification of lguest. As for rusty's last suggestion, I get rid of the whole bunch of "struct lguest *lg = cpu->lg" statements around by using lg_cpu as our base structure wherever it matters. (this saves us 11 lines)
2012 Nov 16
1
[PATCH v4] x86/xen: Use __pa_symbol instead of __pa on C visible symbols
...itial_page_table, PAGE_KERNEL); set_page_prot(initial_kernel_pmd, PAGE_KERNEL); @@ -2036,7 +2037,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) copy_page(initial_page_table, pgd); initial_page_table[KERNEL_PGD_BOUNDARY] = - __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT); + __pgd(__pa_symbol(initial_kernel_pmd) | _PAGE_PRESENT); set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO); set_page_prot(initial_page_table, PAGE_KERNEL_RO); @@ -2045,8 +2046,8 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) pin_pagetable_pfn(MMUEXT_UNPIN_T...
2012 Nov 16
1
[PATCH v4] x86/xen: Use __pa_symbol instead of __pa on C visible symbols
...itial_page_table, PAGE_KERNEL); set_page_prot(initial_kernel_pmd, PAGE_KERNEL); @@ -2036,7 +2037,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) copy_page(initial_page_table, pgd); initial_page_table[KERNEL_PGD_BOUNDARY] = - __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT); + __pgd(__pa_symbol(initial_kernel_pmd) | _PAGE_PRESENT); set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO); set_page_prot(initial_page_table, PAGE_KERNEL_RO); @@ -2045,8 +2046,8 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) pin_pagetable_pfn(MMUEXT_UNPIN_T...
2007 Feb 12
0
[PATCH] lift physical address restriction in svae/restore code
...=============================================== --- 2007-02-07.orig/tools/libxc/xc_linux_restore.c 2007-01-17 11:16:20.000000000 +0100 +++ 2007-02-07/tools/libxc/xc_linux_restore.c 2007-02-12 09:06:05.000000000 +0100 @@ -82,7 +82,7 @@ static int uncanonicalize_pagetable(int if(!(pte & _PAGE_PRESENT)) continue; - pfn = (pte >> PAGE_SHIFT) & 0xffffffff; + pfn = (pte >> PAGE_SHIFT) & MFN_MASK_X86; if(pfn >= max_pfn) { /* This "page table page" is probably not one; bail. */ @@ -120,12 +120,12 @@...
2008 Sep 26
6
Mapping hvm guest pages in Dom0
hello, I would like to map (Read/Write) pages owned by a HVM guest from my Dom0 Linux kernel module. I have access to the "machine frame numbers" of these pages. 1. What is the right interface to do this? kmap needs ''struct page'' ptrs which I doubt exist for pages owned by a HVM guest. Is there a hypercall to do this then? 2. Do I need to modify the HVM behavior in
2011 May 06
14
[PATCH 0 of 4] Use superpages on restore/migrate
This patch series restores the use of superpages when restoring or migrating a VM, while retaining efficient batching of 4k pages when superpages are not appropriate or available. Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com> _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
2012 Mar 01
14
[PATCH 0 of 3] RFC Paging support for AMD NPT V2
There has been some progress, but still no joy. Definitely not intended for inclusion at this point. Tim, Wei, I added a Xen command line toggle to disable IOMMU and P2M table sharing. Tim, I verified that changes to p2m-pt.c don''t break shadow mode (64bit hypervisor and Win 7 guest). Hongkaixing, I incorporated your suggestion in patch 2, so I should add your Signed-off-by eventually.
2007 Apr 18
0
[PATCH 2/2] Use page present for pae pdpes
...8:17.000000000 -0700 +++ linux-2.6.13/arch/i386/mm/pgtable.c 2005-08-31 14:48:53.000000000 -0700 @@ -247,14 +247,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm) if (!pmd) goto out_oom; SetPagePDE(virt_to_page(pmd)); - set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); + set_pgd(&pgd[i], __pgd(_PAGE_PRESENT | __pa(pmd))); } return pgd; out_oom: for (i--; i >= 0; i--) { ClearPagePDE(pfn_to_page(pgd_val(pgd[i]) >> PAGE_SHIFT)); - kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); + kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i]) & PAGE_MASK)); } kmem_cach...
2011 Mar 09
0
[PATCH 1/5] x86: don''t BUG() post-boot in alloc_xen_pagetable()
..., 0); - BUG_ON(pg == NULL); - return page_to_virt(pg); + + BUG_ON(!dom0 && !pg); + return pg ? page_to_virt(pg) : NULL; } mfn = alloc_boot_pages(1, 1); @@ -100,6 +101,9 @@ l3_pgentry_t *virt_to_xen_l3e(unsigned l if ( !(l4e_get_flags(*pl4e) & _PAGE_PRESENT) ) { l3_pgentry_t *pl3e = alloc_xen_pagetable(); + + if ( !pl3e ) + return NULL; clear_page(pl3e); l4e_write(pl4e, l4e_from_paddr(__pa(pl3e), __PAGE_HYPERVISOR)); } @@ -112,9 +116,15 @@ l2_pgentry_t *virt_to_xen_l2e(unsigned l l3_pgentry_t...
2007 Apr 18
0
[PATCH 2/2] Use page present for pae pdpes
...8:17.000000000 -0700 +++ linux-2.6.13/arch/i386/mm/pgtable.c 2005-08-31 14:48:53.000000000 -0700 @@ -247,14 +247,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm) if (!pmd) goto out_oom; SetPagePDE(virt_to_page(pmd)); - set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); + set_pgd(&pgd[i], __pgd(_PAGE_PRESENT | __pa(pmd))); } return pgd; out_oom: for (i--; i >= 0; i--) { ClearPagePDE(pfn_to_page(pgd_val(pgd[i]) >> PAGE_SHIFT)); - kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); + kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i]) & PAGE_MASK)); } kmem_cach...