search for: pgd_index

Displaying 20 results from an estimated 72 matches for "pgd_index".

2010 Sep 14
2
[PATCH 1/4] x86: remove cast from void*
...m> --- arch/x86/xen/mmu.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 42086ac..7436283 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -737,7 +737,7 @@ pgd_t *xen_get_user_pgd(pgd_t *pgd) if (offset < pgd_index(USER_LIMIT)) { struct page *page = virt_to_page(pgd_page); - user_ptr = (pgd_t *)page->private; + user_ptr = page->private; if (user_ptr) user_ptr += offset; } -- 1.7.2.1
2010 Sep 14
2
[PATCH 1/4] x86: remove cast from void*
...m> --- arch/x86/xen/mmu.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 42086ac..7436283 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -737,7 +737,7 @@ pgd_t *xen_get_user_pgd(pgd_t *pgd) if (offset < pgd_index(USER_LIMIT)) { struct page *page = virt_to_page(pgd_page); - user_ptr = (pgd_t *)page->private; + user_ptr = page->private; if (user_ptr) user_ptr += offset; } -- 1.7.2.1
2010 Sep 14
2
[PATCH 1/4] x86: remove cast from void*
...m> --- arch/x86/xen/mmu.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 42086ac..7436283 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -737,7 +737,7 @@ pgd_t *xen_get_user_pgd(pgd_t *pgd) if (offset < pgd_index(USER_LIMIT)) { struct page *page = virt_to_page(pgd_page); - user_ptr = (pgd_t *)page->private; + user_ptr = page->private; if (user_ptr) user_ptr += offset; } -- 1.7.2.1
2012 Nov 16
1
[PATCH v4] x86/xen: Use __pa_symbol instead of __pa on C visible symbols
...-- 1 files changed, 11 insertions(+), 10 deletions(-) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 4a05b39..a63e5f9 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1486,7 +1486,8 @@ static int xen_pgd_alloc(struct mm_struct *mm) if (user_pgd != NULL) { user_pgd[pgd_index(VSYSCALL_START)] = - __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE); + __pgd(__pa_symbol(level3_user_vsyscall) | + _PAGE_TABLE); ret = 0; } @@ -1958,10 +1959,10 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) * pgd. */ if (xen_feature(XE...
2012 Nov 16
1
[PATCH v4] x86/xen: Use __pa_symbol instead of __pa on C visible symbols
...-- 1 files changed, 11 insertions(+), 10 deletions(-) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 4a05b39..a63e5f9 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1486,7 +1486,8 @@ static int xen_pgd_alloc(struct mm_struct *mm) if (user_pgd != NULL) { user_pgd[pgd_index(VSYSCALL_START)] = - __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE); + __pgd(__pa_symbol(level3_user_vsyscall) | + _PAGE_TABLE); ret = 0; } @@ -1958,10 +1959,10 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) * pgd. */ if (xen_feature(XE...
2009 Apr 16
1
NULL pointer dereference at __switch_to() ( __unlazy_fpu ) with lguest PAE patch
...@@ * (vii) Setting up the page tables initially. :*/ +void guest_pagetable_clear_all(struct lg_cpu *cpu); /* 1024 entries in a page table page maps 1024 pages: 4MB. The Switcher is * conveniently placed at the top 4MB, so it uses a separate, complete PTE * page. */ #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1) +/* For PAE we need the PMD index as well. We can use the last 2MB, so we + * will need the last pmd entry of the last pmd page. */ +#ifdef CONFIG_X86_PAE + +#define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1) +#endif + /* We actually need a separate PTE page for each CPU. Remember...
2009 Apr 16
1
NULL pointer dereference at __switch_to() ( __unlazy_fpu ) with lguest PAE patch
...@@ * (vii) Setting up the page tables initially. :*/ +void guest_pagetable_clear_all(struct lg_cpu *cpu); /* 1024 entries in a page table page maps 1024 pages: 4MB. The Switcher is * conveniently placed at the top 4MB, so it uses a separate, complete PTE * page. */ #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1) +/* For PAE we need the PMD index as well. We can use the last 2MB, so we + * will need the last pmd entry of the last pmd page. */ +#ifdef CONFIG_X86_PAE + +#define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1) +#endif + /* We actually need a separate PTE page for each CPU. Remember...
2009 Jun 05
1
[PATCH] lguest: PAE support
...lg_cpu *cpu); void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir, diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index 8a3c3a5..a03b74b 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c @@ -53,6 +53,17 @@ * page. */ #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1) +/* For PAE we need the PMD index as well. We use the last 2MB, so we + * will need the last pmd entry of the last pmd page. */ +#ifdef CONFIG_X86_PAE +#define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1) +#define RESERVE_MEM 2U +#define CHECK_GPGD_MASK _PAGE_PRESENT +#else +#def...
2009 Jun 05
1
[PATCH] lguest: PAE support
...lg_cpu *cpu); void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir, diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index 8a3c3a5..a03b74b 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c @@ -53,6 +53,17 @@ * page. */ #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1) +/* For PAE we need the PMD index as well. We use the last 2MB, so we + * will need the last pmd entry of the last pmd page. */ +#ifdef CONFIG_X86_PAE +#define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1) +#define RESERVE_MEM 2U +#define CHECK_GPGD_MASK _PAGE_PRESENT +#else +#def...
2020 Jul 22
0
[PATCH v4 51/75] x86/sev-es: Handle MMIO events
...fea7a38019 100644 --- a/arch/x86/kernel/sev-es.c +++ b/arch/x86/kernel/sev-es.c @@ -389,7 +389,8 @@ static bool vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt, pgd_t *pgd; pte_t *pte; - pgd = pgd_offset(current->active_mm, va); + pgd = __va(read_cr3_pa()); + pgd = &pgd[pgd_index(va)]; pte = lookup_address_in_pgd(pgd, va, &level); if (!pte) { ctxt->fi.vector = X86_TRAP_PF;
2007 Apr 18
1
[RFC/PATCH LGUEST X86_64 03/13] lguest64 core
...+ +/* guest and host share the same offset into the page tables */ +/* 9 bits at 8 byte increments */ +#define guest_host_idx(vaddr) ((vaddr) & (0x1ff<<3)) + + +/* These access the guest versions. */ +static u64 gtoplev(struct lguest_vcpu *vcpu, unsigned long vaddr) +{ + unsigned index = pgd_index(vaddr); + + return vcpu->pgdir->cr3 + index * sizeof(u64); +} + + +#if 0 + +/* FIXME: we need to put these in and make it more secure! */ +static u32 check_pgtable_entry(struct lguest *lg, u32 entry) +{ + if ((entry & (_PAGE_PWT|_PAGE_PSE)) + || (entry >> PAGE_SHIFT) >= lg-&g...
2007 Apr 18
1
[RFC/PATCH LGUEST X86_64 03/13] lguest64 core
...+ +/* guest and host share the same offset into the page tables */ +/* 9 bits at 8 byte increments */ +#define guest_host_idx(vaddr) ((vaddr) & (0x1ff<<3)) + + +/* These access the guest versions. */ +static u64 gtoplev(struct lguest_vcpu *vcpu, unsigned long vaddr) +{ + unsigned index = pgd_index(vaddr); + + return vcpu->pgdir->cr3 + index * sizeof(u64); +} + + +#if 0 + +/* FIXME: we need to put these in and make it more secure! */ +static u32 check_pgtable_entry(struct lguest *lg, u32 entry) +{ + if ((entry & (_PAGE_PWT|_PAGE_PSE)) + || (entry >> PAGE_SHIFT) >= lg-&g...
2012 Dec 27
30
[PATCH v3 00/11] xen: Initial kexec/kdump implementation
Hi, This set of patches contains initial kexec/kdump implementation for Xen v3. Currently only dom0 is supported, however, almost all infrustructure required for domU support is ready. Jan Beulich suggested to merge Xen x86 assembler code with baremetal x86 code. This could simplify and reduce a bit size of kernel code. However, this solution requires some changes in baremetal x86 code. First of
2012 Dec 27
30
[PATCH v3 00/11] xen: Initial kexec/kdump implementation
Hi, This set of patches contains initial kexec/kdump implementation for Xen v3. Currently only dom0 is supported, however, almost all infrustructure required for domU support is ready. Jan Beulich suggested to merge Xen x86 assembler code with baremetal x86 code. This could simplify and reduce a bit size of kernel code. However, this solution requires some changes in baremetal x86 code. First of
2012 Dec 27
30
[PATCH v3 00/11] xen: Initial kexec/kdump implementation
Hi, This set of patches contains initial kexec/kdump implementation for Xen v3. Currently only dom0 is supported, however, almost all infrustructure required for domU support is ready. Jan Beulich suggested to merge Xen x86 assembler code with baremetal x86 code. This could simplify and reduce a bit size of kernel code. However, this solution requires some changes in baremetal x86 code. First of
2020 Aug 24
0
[PATCH v6 52/76] x86/sev-es: Handle MMIO events
...EPTION; } +static bool vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt, + unsigned long vaddr, phys_addr_t *paddr) +{ + unsigned long va = (unsigned long)vaddr; + unsigned int level; + phys_addr_t pa; + pgd_t *pgd; + pte_t *pte; + + pgd = __va(read_cr3_pa()); + pgd = &pgd[pgd_index(va)]; + pte = lookup_address_in_pgd(pgd, va, &level); + if (!pte) { + ctxt->fi.vector = X86_TRAP_PF; + ctxt->fi.cr2 = vaddr; + ctxt->fi.error_code = 0; + + if (user_mode(ctxt->regs)) + ctxt->fi.error_code |= X86_PF_USER; + + return false; + } + + pa = (phys_addr...
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 08/17] paravirt_ops - memory managment
...4/mm/fault.c +++ clean-start/arch/x86_64/mm/fault.c @@ -180,7 +180,7 @@ void dump_pagetable(unsigned long addres pmd_t *pmd; pte_t *pte; - asm("movq %%cr3,%0" : "=r" (pgd)); + pgd = (pgd_t *)read_cr3(); pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); pgd += pgd_index(address); @@ -347,7 +347,7 @@ asmlinkage void __kprobes do_page_fault( prefetchw(&mm->mmap_sem); /* get the address */ - __asm__("movq %%cr2,%0":"=r" (address)); + address = read_cr2(); info.si_code = SEGV_MAPERR; Index: clean-start/arch/x86_64/mm/init.c =====...
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 08/17] paravirt_ops - memory managment
...4/mm/fault.c +++ clean-start/arch/x86_64/mm/fault.c @@ -180,7 +180,7 @@ void dump_pagetable(unsigned long addres pmd_t *pmd; pte_t *pte; - asm("movq %%cr3,%0" : "=r" (pgd)); + pgd = (pgd_t *)read_cr3(); pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); pgd += pgd_index(address); @@ -347,7 +347,7 @@ asmlinkage void __kprobes do_page_fault( prefetchw(&mm->mmap_sem); /* get the address */ - __asm__("movq %%cr2,%0":"=r" (address)); + address = read_cr2(); info.si_code = SEGV_MAPERR; Index: clean-start/arch/x86_64/mm/init.c =====...
2007 Jun 15
11
[PATCH 00/10] paravirt/subarchitecture boot protocol
This series updates the boot protocol to 2.07 and uses it to implement paravirtual booting. This allows the bootloader to tell the kernel what kind of hardware/pseudo-hardware environment it's coming up under, and the kernel can use the appropriate boot sequence code. Specifically: - Update the boot protocol to 2.07, which adds fields to specify the hardware subarchitecture and some
2007 Jun 15
11
[PATCH 00/10] paravirt/subarchitecture boot protocol
This series updates the boot protocol to 2.07 and uses it to implement paravirtual booting. This allows the bootloader to tell the kernel what kind of hardware/pseudo-hardware environment it's coming up under, and the kernel can use the appropriate boot sequence code. Specifically: - Update the boot protocol to 2.07, which adds fields to specify the hardware subarchitecture and some