Displaying 20 results from an estimated 23 matches for "__vma".
Did you mean:
__va
2007 Apr 18
0
[PATCH 7/9] 00mma remove set pte atomic.patch
...======================
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -13,19 +13,11 @@
* Note: the old pte is known to not be writable, so we don't need to
* worry about dirty bits etc getting lost.
*/
-#ifndef __HAVE_ARCH_SET_PTE_ATOMIC
#define ptep_establish(__vma, __address, __ptep, __entry) \
do { \
set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
flush_tlb_page(__vma, __address); \
} while (0)
-#else /* __HAVE_ARCH_SET_PTE_ATOMIC */
-#define ptep_establish(__vma, __address, __ptep, __entry) \
-do { \
- set_pte_...
2007 Apr 18
1
[RFC, PATCH 19/24] i386 Vmi mmu changes
...pte_low);
}
-
+
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
if (!pte_young(*ptep))
@@ -281,6 +289,15 @@ static inline void ptep_set_wrprotect(st
clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
}
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+ do { \
+ if (__dirty) { \
+ (__ptep)->pte_low = (__entry).pte_low; \
+ flush_tlb_page(__vma, __address); \
+ } \
+ } while (0)
+#endif /* !__HAVE_SUBARCH_PTE_WRITE_FUNCTIONS */
+
/*
* clone_pgd_range(pgd_t *dst, p...
2007 Apr 18
1
[RFC, PATCH 19/24] i386 Vmi mmu changes
...pte_low);
}
-
+
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
if (!pte_young(*ptep))
@@ -281,6 +289,15 @@ static inline void ptep_set_wrprotect(st
clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
}
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+ do { \
+ if (__dirty) { \
+ (__ptep)->pte_low = (__entry).pte_low; \
+ flush_tlb_page(__vma, __address); \
+ } \
+ } while (0)
+#endif /* !__HAVE_SUBARCH_PTE_WRITE_FUNCTIONS */
+
/*
* clone_pgd_range(pgd_t *dst, p...
2020 Jul 20
1
[PATCH v2 3/5] mm/notifier: add migration invalidation type
...)
> @@ -513,6 +519,7 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
> range->start = start;
> range->end = end;
> range->flags = flags;
> + range->migrate_pgmap_owner = NULL;
> }
>
> #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 2bbc5c4c672e..9b3dcb81be5f 100644
> +++ b/mm/migrate.c
> @@ -2391,8 +2391,14 @@ static void migrate_vma_collect(struct migrate_vma *migrate)
> {
> struct mmu_notifier_range range;
>
> - mmu_notifi...
2007 Apr 18
3
[PATCH] abstract out bits of ldt.c
Chris Wright wrote:
>* Zachary Amsden (zach@vmware.com) wrote:
>
>
>>Does Xen assume page aligned descriptor tables? I assume from this
>>
>>
>
>Yes.
>
>
>
>>patch and snippets I have gathered from others, that is a yes, and other
>>things here imply that DT pages are not shadowed. If so, Xen itself
>>must have live segments
2007 Apr 18
3
[PATCH] abstract out bits of ldt.c
Chris Wright wrote:
>* Zachary Amsden (zach@vmware.com) wrote:
>
>
>>Does Xen assume page aligned descriptor tables? I assume from this
>>
>>
>
>Yes.
>
>
>
>>patch and snippets I have gathered from others, that is a yes, and other
>>things here imply that DT pages are not shadowed. If so, Xen itself
>>must have live segments
2020 Jul 13
0
[PATCH v2 3/5] mm/notifier: add migration invalidation type
...nt mm_has_notifiers(struct mm_struct *mm)
@@ -513,6 +519,7 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
range->start = start;
range->end = end;
range->flags = flags;
+ range->migrate_pgmap_owner = NULL;
}
#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
diff --git a/mm/migrate.c b/mm/migrate.c
index 2bbc5c4c672e..9b3dcb81be5f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2391,8 +2391,14 @@ static void migrate_vma_collect(struct migrate_vma *migrate)
{
struct mmu_notifier_range range;
- mmu_notifier_range_init(&ra...
2020 Jul 06
0
[PATCH 3/5] mm/notifier: add migration invalidation type
...static inline int mm_has_notifiers(struct mm_struct *mm)
@@ -513,6 +519,7 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
range->start = start;
range->end = end;
range->flags = flags;
+ range->data = NULL;
}
#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
diff --git a/mm/migrate.c b/mm/migrate.c
index 2bbc5c4c672e..62270e6727b0 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2391,8 +2391,14 @@ static void migrate_vma_collect(struct migrate_vma *migrate)
{
struct mmu_notifier_range range;
- mmu_notifier_range_init(&ra...
2007 Oct 31
5
[PATCH 0/7] (Re-)introducing pvops for x86_64 - Real pvops work part
Hey folks,
This is the part-of-pvops-implementation-that-is-not-exactly-a-merge. Neat,
uh? This is the majority of the work.
The first patch in the series does not really belong here. It was already
sent to lkml separetedly before, but I'm including it again, for a very
simple reason: Try to test the paravirt patches without it, and you'll fail
miserably ;-) (and it was not yet
2007 Oct 31
5
[PATCH 0/7] (Re-)introducing pvops for x86_64 - Real pvops work part
Hey folks,
This is the part-of-pvops-implementation-that-is-not-exactly-a-merge. Neat,
uh? This is the majority of the work.
The first patch in the series does not really belong here. It was already
sent to lkml separetedly before, but I'm including it again, for a very
simple reason: Try to test the paravirt patches without it, and you'll fail
miserably ;-) (and it was not yet
2020 Jul 21
0
[PATCH v3 3/5] mm/notifier: add migration invalidation type
...nt mm_has_notifiers(struct mm_struct *mm)
@@ -513,6 +519,7 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
range->start = start;
range->end = end;
range->flags = flags;
+ range->migrate_pgmap_owner = NULL;
}
#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
diff --git a/mm/migrate.c b/mm/migrate.c
index e3ea68e3a08b..96e1f41a991e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2392,8 +2392,14 @@ static void migrate_vma_collect(struct migrate_vma *migrate)
{
struct mmu_notifier_range range;
- mmu_notifier_range_init(&ra...
2020 Jul 23
0
[PATCH v4 3/6] mm/notifier: add migration invalidation type
...nt mm_has_notifiers(struct mm_struct *mm)
@@ -513,6 +519,7 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
range->start = start;
range->end = end;
range->flags = flags;
+ range->migrate_pgmap_owner = NULL;
}
#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
diff --git a/mm/migrate.c b/mm/migrate.c
index e3ea68e3a08b..96e1f41a991e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2392,8 +2392,14 @@ static void migrate_vma_collect(struct migrate_vma *migrate)
{
struct mmu_notifier_range range;
- mmu_notifier_range_init(&ra...
2007 Aug 10
9
[PATCH 0/25 -v2] paravirt_ops for x86_64, second round
Here is an slightly updated version of the paravirt_ops patch.
If your comments and criticism were welcome before, now it's even more!
There are some issues that are _not_ addressed in this revision, and here
are the causes:
* split debugreg into multiple functions, suggested by Andi:
- Me and jsfg agree that introducing more pvops (specially 14!) is
not worthwhile. So, although we do
2007 Aug 10
9
[PATCH 0/25 -v2] paravirt_ops for x86_64, second round
Here is an slightly updated version of the paravirt_ops patch.
If your comments and criticism were welcome before, now it's even more!
There are some issues that are _not_ addressed in this revision, and here
are the causes:
* split debugreg into multiple functions, suggested by Andi:
- Me and jsfg agree that introducing more pvops (specially 14!) is
not worthwhile. So, although we do
2007 Nov 09
11
[PATCH 0/24] paravirt_ops for unified x86 - that's me again!
Hey folks,
Here's a new spin of the pvops64 patch series.
We didn't get that many comments from the last time,
so it should be probably almost ready to get in. Heya!
>From the last version, the most notable changes are:
* consolidation of system.h, merging jeremy's comments about ordering
concerns
* consolidation of smp functions that goes through smp_ops. They're sharing
2007 Nov 09
11
[PATCH 0/24] paravirt_ops for unified x86 - that's me again!
Hey folks,
Here's a new spin of the pvops64 patch series.
We didn't get that many comments from the last time,
so it should be probably almost ready to get in. Heya!
>From the last version, the most notable changes are:
* consolidation of system.h, merging jeremy's comments about ordering
concerns
* consolidation of smp functions that goes through smp_ops. They're sharing
2007 Aug 15
13
[PATCH 0/25][V3] pvops_64 last round (hopefully)
This is hopefully the last iteration of the pvops64 patch.
>From the last version, we have only one change, which is include/asm-x86_64/processor.h: There were still one survivor in raw asm.
Also, git screwed me up for some reason, and the 25th patch was missing the new files, paravirt.{c,h}. (although I do remember having git-add'ed it, but who knows...)
Andrew, could you please push it
2007 Aug 15
13
[PATCH 0/25][V3] pvops_64 last round (hopefully)
This is hopefully the last iteration of the pvops64 patch.
>From the last version, we have only one change, which is include/asm-x86_64/processor.h: There were still one survivor in raw asm.
Also, git screwed me up for some reason, and the 25th patch was missing the new files, paravirt.{c,h}. (although I do remember having git-add'ed it, but who knows...)
Andrew, could you please push it
2020 Jul 13
9
[PATCH v2 0/5] mm/migrate: avoid device private invalidations
The goal for this series is to avoid device private memory TLB
invalidations when migrating a range of addresses from system
memory to device private memory and some of those pages have already
been migrated. The approach taken is to introduce a new mmu notifier
invalidation event type and use that in the device driver to skip
invalidation callbacks from migrate_vma_setup(). The device driver is
2020 Jul 06
8
[PATCH 0/5] mm/migrate: avoid device private invalidations
The goal for this series is to avoid device private memory TLB
invalidations when migrating a range of addresses from system
memory to device private memory and some of those pages have already
been migrated. The approach taken is to introduce a new mmu notifier
invalidation event type and use that in the device driver to skip
invalidation callbacks from migrate_vma_setup(). The device driver is