Displaying 20 results from an estimated 49 matches for "pgd_offset".
2020 May 20
1
[PATCH v3 51/75] x86/sev-es: Handle MMIO events
...*ctxt,
> return ES_EXCEPTION;
> }
>
> +static phys_addr_t vc_slow_virt_to_phys(struct ghcb *ghcb, unsigned long vaddr)
> +{
> + unsigned long va = (unsigned long)vaddr;
> + unsigned int level;
> + phys_addr_t pa;
> + pgd_t *pgd;
> + pte_t *pte;
> +
> + pgd = pgd_offset(current->active_mm, va);
> + pte = lookup_address_in_pgd(pgd, va, &level);
> + if (!pte)
> + return 0;
'0' is a valid physical address. It happens to be reserved in the kernel
thanks to L1TF, but using '0' as an error code is ugly. Not to mention
none of the call...
2020 Jul 22
0
[PATCH v4 51/75] x86/sev-es: Handle MMIO events
...ff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c
index 251d0aabc55a..e1fea7a38019 100644
--- a/arch/x86/kernel/sev-es.c
+++ b/arch/x86/kernel/sev-es.c
@@ -389,7 +389,8 @@ static bool vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
pgd_t *pgd;
pte_t *pte;
- pgd = pgd_offset(current->active_mm, va);
+ pgd = __va(read_cr3_pa());
+ pgd = &pgd[pgd_index(va)];
pte = lookup_address_in_pgd(pgd, va, &level);
if (!pte) {
ctxt->fi.vector = X86_TRAP_PF;
2007 Apr 18
0
[PATCH 7/9] 00mma remove set pte atomic.patch
...-frv/pgtable.h
+++ b/include/asm-frv/pgtable.h
@@ -175,8 +175,6 @@ do { \
asm volatile("dcf %M0" :: "U"(*pteptr)); \
} while(0)
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
-
-#define set_pte_atomic(pteptr, pteval) set_pte((pteptr), (pteval))
/*
* pgd_offset() returns a (pgd_t *)
===================================================================
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -13,19 +13,11 @@
* Note: the old pte is known to not be writable, so we don't need to
* worry about dirty bits etc getting los...
2020 Apr 28
0
[PATCH v3 51/75] x86/sev-es: Handle MMIO events
...4,25 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
return ES_EXCEPTION;
}
+static phys_addr_t vc_slow_virt_to_phys(struct ghcb *ghcb, unsigned long vaddr)
+{
+ unsigned long va = (unsigned long)vaddr;
+ unsigned int level;
+ phys_addr_t pa;
+ pgd_t *pgd;
+ pte_t *pte;
+
+ pgd = pgd_offset(current->active_mm, va);
+ pte = lookup_address_in_pgd(pgd, va, &level);
+ if (!pte)
+ return 0;
+
+ pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
+ pa |= va & ~page_level_mask(level);
+
+ return pa;
+}
+
/* Include code shared with pre-decompression boot stage */
#include &quo...
1997 Nov 13
0
Linux F00F Patch [Forwarded e-mail from Aleph One]
...(twopage + 4096-7*8);
+
+ memcpy(idt2,&idt,sizeof(idt));
+
+ idt_d.limit = 256*8-1;
+ idt_d.addr = idt2;
+ idt_d.__pad = 0;
+
+ __asm__ __volatile__("\tlidt %0": "=m" (idt_d));
+
+ /*
+ * Unmap lower page:
+ */
+ pgd = pgd_offset(current->mm, twopage);
+ pmd = pmd_offset(pgd, twopage);
+ pte = pte_offset(pmd, twopage);
+
+ pte_clear(pte);
+ flush_tlb_all();
+
+ printk(" ... done\n");
+}
+
+
+
__initfunc(void trap_init(void))
{
int i;
diff -u --recursive --new-file v2.1.62...
2020 Nov 06
0
[PATCH v3 3/6] mm: support THP migration to device private memory
...e extra reference */
if (is_zone_device_page(page)) {
/*
@@ -2833,13 +2908,191 @@ int migrate_vma_setup(struct migrate_vma *args)
}
EXPORT_SYMBOL(migrate_vma_setup);
+static pmd_t *find_pmd(struct mm_struct *mm, unsigned long addr)
+{
+ pgd_t *pgdp;
+ p4d_t *p4dp;
+ pud_t *pudp;
+
+ pgdp = pgd_offset(mm, addr);
+ p4dp = p4d_alloc(mm, pgdp, addr);
+ if (!p4dp)
+ return NULL;
+ pudp = pud_alloc(mm, p4dp, addr);
+ if (!pudp)
+ return NULL;
+ return pmd_alloc(mm, pudp, addr);
+}
+
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+/*
+ * This code closely follows:
+ * do_huge_pmd_anonymous_page()
+ * __...
2019 Mar 30
1
[PATCH 2/5] x86: Convert some slow-path static_cpu_has() callers to boot_cpu_has()
..._pmd_walk(u_pgd, LDT_BASE_ADDR);
- if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
+ if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
set_pmd(u_pmd, *k_pmd);
}
@@ -181,7 +181,7 @@ static void map_ldt_struct_to_user(struct mm_struct *mm)
{
pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
- if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
+ if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
set_pgd(kernel_to_user_pgdp(pgd), *pgd);
}
@@ -208,7 +208,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)...
2006 Mar 14
12
[RFC] VMI for Xen?
I''m sure everyone has seen the drop of VMI patches for Linux at this
point, but just in case, the link is included below.
I''ve read this version of the VMI spec and have made my way through most
of the patches. While I wasn''t really that impressed with the first
spec wrt Xen, the second version seems to be much more palatable.
Specifically, the code inlining and
2008 Feb 25
6
[PATCH 0/4] ia64/xen: paravirtualization of hand written assembly code
Hi. The patch I send before was too large so that it was dropped from
the maling list. I'm sending again with smaller size.
This patch set is the xen paravirtualization of hand written assenbly
code. And I expect that much clean up is necessary before merge.
We really need the feed back before starting actual clean up as Eddie
already said before.
Eddie discussed how to clean up and suggested
2008 Feb 25
6
[PATCH 0/4] ia64/xen: paravirtualization of hand written assembly code
Hi. The patch I send before was too large so that it was dropped from
the maling list. I'm sending again with smaller size.
This patch set is the xen paravirtualization of hand written assenbly
code. And I expect that much clean up is necessary before merge.
We really need the feed back before starting actual clean up as Eddie
already said before.
Eddie discussed how to clean up and suggested
2020 Nov 06
12
[PATCH v3 0/6] mm/hmm/nouveau: add THP migration to migrate_vma_*
This series adds support for transparent huge page migration to
migrate_vma_*() and adds nouveau SVM and HMM selftests as consumers.
Earlier versions were posted previously [1] and [2].
The patches apply cleanly to the linux-mm 5.10.0-rc2 tree. There are a
lot of other THP patches being posted. I don't think there are any
semantic conflicts but there may be some merge conflicts depending on
2020 Sep 02
10
[PATCH v2 0/7] mm/hmm/nouveau: add THP migration to migrate_vma_*
This series adds support for transparent huge page migration to
migrate_vma_*() and adds nouveau SVM and HMM selftests as consumers.
An earlier version was posted previously [1]. This version now
supports splitting a THP midway in the migration process which
led to a number of changes.
The patches apply cleanly to the current linux-mm tree. Since there
are a couple of patches in linux-mm from Dan
2019 Nov 08
15
[PATCH 00/13] Finish off [smp_]read_barrier_depends()
Hi all,
Although [smp_]read_barrier_depends() became part of READ_ONCE() in
commit 76ebbe78f739 ("locking/barriers: Add implicit
smp_read_barrier_depends() to READ_ONCE()"), it still limps on in the
Linux memory model with the sinister hope of attracting innocent new
users so that it becomes impossible to remove altogether.
Let's strike before it's too late: there's only
2007 Apr 18
15
[PATCH 0 of 13] Basic infrastructure patches for a paravirtualized kernel
[ REPOST: Apologies to anyone who has seen this before. It
didn't make it onto any of the lists it should have. -J ]
Hi Andrew,
This series of patches lays the basic ground work for the
paravirtualized kernel patches coming later on. I think this lot is
ready for the rough-and-tumble world of the -mm tree.
For the most part, these patches do nothing or very little. The
patches should
2007 Apr 18
15
[PATCH 0 of 13] Basic infrastructure patches for a paravirtualized kernel
[ REPOST: Apologies to anyone who has seen this before. It
didn't make it onto any of the lists it should have. -J ]
Hi Andrew,
This series of patches lays the basic ground work for the
paravirtualized kernel patches coming later on. I think this lot is
ready for the rough-and-tumble world of the -mm tree.
For the most part, these patches do nothing or very little. The
patches should
2020 Jun 19
22
[PATCH 00/16] mm/hmm/nouveau: THP mapping and migration
These patches apply to linux-5.8.0-rc1. Patches 1-3 should probably go
into 5.8, the others can be queued for 5.9. Patches 4-6 improve the HMM
self tests. Patch 7-8 prepare nouveau for the meat of this series which
adds support and testing for compound page mapping of system memory
(patches 9-11) and compound page migration to device private memory
(patches 12-16). Since these changes are split
2007 Apr 18
23
[patch 00/20] paravirt_ops updates
Hi Andi,
Here's a repost of the paravirt_ops update series I posted the other day.
Since then, I found a few potential bugs with patching clobbering,
cleaned up and documented paravirt.h and the patching machinery.
Overview:
add-MAINTAINERS.patch
obvious
remove-CONFIG_DEBUG_PARAVIRT.patch
No longer meaningful or needed.
paravirt-nop.patch
Clean up nop paravirt_ops functions, mainly to
2007 Apr 18
23
[patch 00/20] paravirt_ops updates
Hi Andi,
Here's a repost of the paravirt_ops update series I posted the other day.
Since then, I found a few potential bugs with patching clobbering,
cleaned up and documented paravirt.h and the patching machinery.
Overview:
add-MAINTAINERS.patch
obvious
remove-CONFIG_DEBUG_PARAVIRT.patch
No longer meaningful or needed.
paravirt-nop.patch
Clean up nop paravirt_ops functions, mainly to
2020 Jul 14
92
[PATCH v4 00/75] x86: SEV-ES Guest Support
From: Joerg Roedel <jroedel at suse.de>
Hi,
here is the fourth version of the SEV-ES Guest Support patches. I
addressed the review comments sent to me for the previous version and
rebased the code v5.8-rc5.
The biggest change in this version is the IST handling code for the
#VC handler. I adapted the entry code for the #VC handler to the big
pile of entry code changes merged into
2020 Jul 14
92
[PATCH v4 00/75] x86: SEV-ES Guest Support
From: Joerg Roedel <jroedel at suse.de>
Hi,
here is the fourth version of the SEV-ES Guest Support patches. I
addressed the review comments sent to me for the previous version and
rebased the code v5.8-rc5.
The biggest change in this version is the IST handling code for the
#VC handler. I adapted the entry code for the #VC handler to the big
pile of entry code changes merged into