Displaying 20 results from an estimated 51 matches for "arch_enter_lazy_mmu_mod".
Did you mean:
arch_enter_lazy_mmu_mode
2012 Apr 10
7
[PATCH v3 1/2] xen: enter/exit lazy_mmu_mode around m2p_override calls
...= NULL. Batching all the calls together
is a great performance benefit because it means issuing one hypercall
total rather than two hypercall per page.
If paravirt_lazy_mode is set PARAVIRT_LAZY_MMU, all these calls are
going to be batched together, otherwise they are issued one at a time.
Adding arch_enter_lazy_mmu_mode/arch_leave_lazy_mmu_mode around the
m2p_add/remove_override calls forces paravirt_lazy_mode to
PARAVIRT_LAZY_MMU, therefore makes sure that they are always batched.
Changes in v3:
- do not call arch_enter/leave_lazy_mmu_mode in xen_blkbk_unmap, that
can be called in interrupt context.
Signed-o...
2007 Apr 18
0
[PATCH 3/9] 00mm3 lazy mmu mode hooks.patch
...entered and left under the protection of
+ * the page table locks for all page tables which may be modified. In the UP
+ * case, this is required so that preemption is disabled, and in the SMP case,
+ * it must synchronize the delayed page table writes properly on other CPUs.
+ */
+#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+#define arch_enter_lazy_mmu_mode() do {} while (0)
+#define arch_leave_lazy_mmu_mode() do {} while (0)
+#endif
+
+/*
* When walking page tables, get the address of the next boundary,
* or the end address of the range if that comes earlier. Although no
* vma end wraps to 0, rounded up __bou...
2007 Apr 18
2
pte_offset_map + lazy mmu
...y got a patch which turns
this into a pv_op, along with a Xen implementation. But I think its
probably an excess pv_op for a relatively minor corner case. It seems
to me that it would be better to define kpte_clear_flush as:
#define kpte_clear_flush(ptep, vaddr) \
do { \
arch_enter_lazy_mmu_mode(); \
pte_clear(&init_mm, vaddr, ptep); \
__flush_tlb_one(vaddr); \
arch_leave_lazy_mmu_mode(); \
} while (0)
and take advantage of mmu batching to make this operation efficient.
But I'm not sure if this is safe.
(Also, kmap_atomic could use set_...
2007 Apr 18
2
pte_offset_map + lazy mmu
...y got a patch which turns
this into a pv_op, along with a Xen implementation. But I think its
probably an excess pv_op for a relatively minor corner case. It seems
to me that it would be better to define kpte_clear_flush as:
#define kpte_clear_flush(ptep, vaddr) \
do { \
arch_enter_lazy_mmu_mode(); \
pte_clear(&init_mm, vaddr, ptep); \
__flush_tlb_one(vaddr); \
arch_leave_lazy_mmu_mode(); \
} while (0)
and take advantage of mmu batching to make this operation efficient.
But I'm not sure if this is safe.
(Also, kmap_atomic could use set_...
2014 Feb 27
3
[PATCH] xen/grant-table: Refactor gnttab_[un]map_refs to avoid m2p_override
...ap_ops,
+ struct page **pages, unsigned int count)
+{
+ int i, ret = 0;
+ bool lazy = false;
+ pte_t *pte;
+
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return 0;
+
+ if (kmap_ops &&
+ !in_interrupt() &&
+ paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
+ arch_enter_lazy_mmu_mode();
+ lazy = true;
+ }
+
+ for (i = 0; i < count; i++) {
+ unsigned long mfn, pfn;
+
+ /* Do not add to override if the map failed. */
+ if (map_ops[i].status)
+ continue;
+
+ if (map_ops[i].flags & GNTMAP_contains_pte) {
+ pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)...
2014 Feb 27
3
[PATCH] xen/grant-table: Refactor gnttab_[un]map_refs to avoid m2p_override
...ap_ops,
+ struct page **pages, unsigned int count)
+{
+ int i, ret = 0;
+ bool lazy = false;
+ pte_t *pte;
+
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return 0;
+
+ if (kmap_ops &&
+ !in_interrupt() &&
+ paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
+ arch_enter_lazy_mmu_mode();
+ lazy = true;
+ }
+
+ for (i = 0; i < count; i++) {
+ unsigned long mfn, pfn;
+
+ /* Do not add to override if the map failed. */
+ if (map_ops[i].status)
+ continue;
+
+ if (map_ops[i].flags & GNTMAP_contains_pte) {
+ pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)...
2007 Oct 01
2
[PATCH RFC] paravirt: cleanup lazy mode handling
...reemptible());
+
+ x86_write_percpu(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
+ (*ops->leave)();
+}
+
+static void flush_lazy(enum paravirt_lazy_mode mode, struct pv_lazy_ops *ops)
+{
+ if (x86_read_percpu(paravirt_lazy_mode) == mode) {
+ (*ops->leave)();
+ (*ops->enter)();
+ }
+}
+
+void arch_enter_lazy_mmu_mode(void)
+{
+ enter_lazy(PARAVIRT_LAZY_MMU, &pv_mmu_ops.lazy_mode);
+}
+
+void arch_leave_lazy_mmu_mode(void)
+{
+ leave_lazy(PARAVIRT_LAZY_MMU, &pv_mmu_ops.lazy_mode);
+}
+
+void arch_flush_lazy_mmu_mode(void)
+{
+ flush_lazy(PARAVIRT_LAZY_MMU, &pv_mmu_ops.lazy_mode);
+}
+
+void arch_ent...
2007 Oct 01
2
[PATCH RFC] paravirt: cleanup lazy mode handling
...reemptible());
+
+ x86_write_percpu(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
+ (*ops->leave)();
+}
+
+static void flush_lazy(enum paravirt_lazy_mode mode, struct pv_lazy_ops *ops)
+{
+ if (x86_read_percpu(paravirt_lazy_mode) == mode) {
+ (*ops->leave)();
+ (*ops->enter)();
+ }
+}
+
+void arch_enter_lazy_mmu_mode(void)
+{
+ enter_lazy(PARAVIRT_LAZY_MMU, &pv_mmu_ops.lazy_mode);
+}
+
+void arch_leave_lazy_mmu_mode(void)
+{
+ leave_lazy(PARAVIRT_LAZY_MMU, &pv_mmu_ops.lazy_mode);
+}
+
+void arch_flush_lazy_mmu_mode(void)
+{
+ flush_lazy(PARAVIRT_LAZY_MMU, &pv_mmu_ops.lazy_mode);
+}
+
+void arch_ent...
2007 Apr 18
15
[PATCH 0 of 13] Basic infrastructure patches for a paravirtualized kernel
[ REPOST: Apologies to anyone who has seen this before. It
didn't make it onto any of the lists it should have. -J ]
Hi Andrew,
This series of patches lays the basic ground work for the
paravirtualized kernel patches coming later on. I think this lot is
ready for the rough-and-tumble world of the -mm tree.
For the most part, these patches do nothing or very little. The
patches should
2007 Apr 18
15
[PATCH 0 of 13] Basic infrastructure patches for a paravirtualized kernel
[ REPOST: Apologies to anyone who has seen this before. It
didn't make it onto any of the lists it should have. -J ]
Hi Andrew,
This series of patches lays the basic ground work for the
paravirtualized kernel patches coming later on. I think this lot is
ready for the rough-and-tumble world of the -mm tree.
For the most part, these patches do nothing or very little. The
patches should
2007 Oct 09
0
[PATCH RFC REPOST 2/2] paravirt: clean up lazy mode handling
..._ops.lazy_mode.leave);
}
static inline void arch_flush_lazy_cpu_mode(void)
{
- PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_FLUSH);
+ if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
+ arch_leave_lazy_cpu_mode();
+ arch_enter_lazy_cpu_mode();
+ }
}
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
static inline void arch_enter_lazy_mmu_mode(void)
{
- PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_MMU);
+ PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
}
static inline void arch_leave_lazy_mmu_mode(void)
{
- PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_NONE);
+ PVOP_VCALL0(pv_mm...
2007 Oct 09
0
[PATCH RFC REPOST 2/2] paravirt: clean up lazy mode handling
..._ops.lazy_mode.leave);
}
static inline void arch_flush_lazy_cpu_mode(void)
{
- PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_FLUSH);
+ if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
+ arch_leave_lazy_cpu_mode();
+ arch_enter_lazy_cpu_mode();
+ }
}
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
static inline void arch_enter_lazy_mmu_mode(void)
{
- PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_MMU);
+ PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
}
static inline void arch_leave_lazy_mmu_mode(void)
{
- PVOP_VCALL1(pv_misc_ops.set_lazy_mode, PARAVIRT_LAZY_NONE);
+ PVOP_VCALL0(pv_mm...
2006 Jul 25
18
[PATCH] turn off writable page tables
At OLS I gave a talk on some of the Xen scalability inhibitors, and one
of these was writable page tables. We went over why the feature does
not scale, but just as important, we found that the uniprocessor case
does not provide any advantage either. These tests were done on x86_64,
so I wanted to run the 1-way test on 32 bit to show the same problem.
So, I have run with writable PTs and
2023 Mar 17
0
[PATCH v2] x86/paravirt: convert simple paravirt functions to asm
...+DEFINE_PARAVIRT_ASM(pv_native_irq_enable, "sti", .noinstr.text);
+DEFINE_PARAVIRT_ASM(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text);
#endif
DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
@@ -197,11 +197,6 @@ void paravirt_end_context_switch(struct task_struct *next)
arch_enter_lazy_mmu_mode();
}
-static noinstr unsigned long pv_native_read_cr2(void)
-{
- return native_read_cr2();
-}
-
static noinstr void pv_native_write_cr2(unsigned long val)
{
native_write_cr2(val);
@@ -222,16 +217,6 @@ noinstr void pv_native_wbinvd(void)
native_wbinvd();
}
-static noinstr void pv_native...
2020 Mar 16
0
[PATCH 2/4] mm: handle multiple owners of device private pages in migrate_vma
...are not migrated at all.
+ */
+ void *src_owner;
};
int migrate_vma_setup(struct migrate_vma *args);
diff --git a/mm/migrate.c b/mm/migrate.c
index b1092876e537..7605d2c23433 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2241,7 +2241,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
arch_enter_lazy_mmu_mode();
for (; addr < end; addr += PAGE_SIZE, ptep++) {
- unsigned long mpfn, pfn;
+ unsigned long mpfn = 0, pfn;
struct page *page;
swp_entry_t entry;
pte_t pte;
@@ -2255,8 +2255,6 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
}
if (!pte_present(pte)) {
- mpfn = 0;
-...
2007 Apr 18
0
[PATCH 2/6] Paravirt CPU hypercall batching mode
...LAZY_NONE 0
+#define PARAVIRT_LAZY_MMU 1
+#define PARAVIRT_LAZY_CPU 2
+
+#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
+#define arch_enter_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_CPU)
+#define arch_leave_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_NONE)
+
+#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+#define arch_enter_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_MMU)
+#define arch_leave_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_NONE)
+
/* These all sit in the .parainstructions section to tell us what to patch. */
struct paravirt_patch {
u8 *instr; /* original...
2007 Apr 18
0
[PATCH 2/6] Paravirt CPU hypercall batching mode
...LAZY_NONE 0
+#define PARAVIRT_LAZY_MMU 1
+#define PARAVIRT_LAZY_CPU 2
+
+#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
+#define arch_enter_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_CPU)
+#define arch_leave_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_NONE)
+
+#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+#define arch_enter_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_MMU)
+#define arch_leave_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_NONE)
+
/* These all sit in the .parainstructions section to tell us what to patch. */
struct paravirt_patch {
u8 *instr; /* original...
2007 Apr 18
2
[PATCH 2/5] Paravirt cpu batching.patch
...LAZY_NONE 0
+#define PARAVIRT_LAZY_MMU 1
+#define PARAVIRT_LAZY_CPU 2
+
+#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
+#define arch_enter_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_CPU)
+#define arch_leave_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_NONE)
+
+#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+#define arch_enter_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_MMU)
+#define arch_leave_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_NONE)
+
/* These all sit in the .parainstructions section to tell us what to patch. */
struct paravirt_patch {
u8 *instr; /* original...
2007 Apr 18
2
[PATCH 2/5] Paravirt cpu batching.patch
...LAZY_NONE 0
+#define PARAVIRT_LAZY_MMU 1
+#define PARAVIRT_LAZY_CPU 2
+
+#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
+#define arch_enter_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_CPU)
+#define arch_leave_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_NONE)
+
+#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+#define arch_enter_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_MMU)
+#define arch_leave_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_NONE)
+
/* These all sit in the .parainstructions section to tell us what to patch. */
struct paravirt_patch {
u8 *instr; /* original...
2023 Mar 08
3
[PATCH] x86/paravirt: convert simple paravirt functions to asm
...i", .text);
+DEFINE_PARAVIRT_ASM(pv_native_irq_enable, "sti", .text);
+DEFINE_PARAVIRT_ASM(pv_native_read_cr2, "mov %cr2, %rax", .text);
#endif
DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
@@ -197,11 +197,6 @@ void paravirt_end_context_switch(struct task_struct *next)
arch_enter_lazy_mmu_mode();
}
-static noinstr unsigned long pv_native_read_cr2(void)
-{
- return native_read_cr2();
-}
-
static noinstr void pv_native_write_cr2(unsigned long val)
{
native_write_cr2(val);
@@ -222,16 +217,6 @@ noinstr void pv_native_wbinvd(void)
native_wbinvd();
}
-static noinstr void pv_native...