Displaying 20 results from an estimated 44 matches for "pte_write".
Did you mean:
  pte_mkwrite
  
2016 Oct 26
2
CVE-2016-5195 DirtyCOW : Critical Linux Kernel Flaw
...0000000 -0700
+++ linux-2.6.32-642.6.2.el6/mm/memory.c	2016-10-24 06:19:16.000000000 -0700
@@ -1177,6 +1177,24 @@ int zap_vma_ptes(struct vm_area_struct *
 }
 EXPORT_SYMBOL_GPL(zap_vma_ptes);
 
+static inline bool can_follow_write_pte(pte_t pte, struct page *page,
+					unsigned int flags)
+{
+	if (pte_write(pte))
+		return true;
+
+	/*
+	 * Make sure that we are really following CoWed page. We do not really
+	 * have to care about exclusiveness of the page because we only want
+	 * to ensure that once COWed page hasn't disappeared in the meantime
+	 * or it hasn't been merged to a KSM page.
+...
2019 Jul 29
1
[PATCH 9/9] mm: remove the MIGRATE_PFN_WRITE flag
...se {
>   			if (is_zero_pfn(pfn)) {
>   				mpfn = MIGRATE_PFN_MIGRATE;
> @@ -2250,7 +2251,8 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
>   			}
>   			page = vm_normal_page(migrate->vma, addr, pte);
>   			mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
> -			mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
> +			if (pte_write(pte))
> +				writable = true;
>   		}
>   
>   		/* FIXME support THP */
> @@ -2284,8 +2286,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
>   			ptep_get_and_clear(mm, addr, ptep);
>   
>   			/* Setup special migra...
2016 Oct 25
5
CVE-2016-5195 DirtyCOW : Critical Linux Kernel Flaw
On Tue, 25 Oct 2016 10:06:12 +0200
Christian Anthon <anthon at rth.dk> wrote:
> What is the best approach on centos 6 to mitigate the problem is 
> officially patched? As far as I can tell Centos 6 is vulnerable to 
> attacks using ptrace.
I can confirm that c6 is vulnerable, we're running a patched kernel
(local build) using a rhel6 adaptation of the upstream fix.
Ask
2019 Jul 29
0
[PATCH 9/9] mm: remove the MIGRATE_PFN_WRITE flag
...PFN_WRITE;
+				writable = true;
 		} else {
 			if (is_zero_pfn(pfn)) {
 				mpfn = MIGRATE_PFN_MIGRATE;
@@ -2250,7 +2251,8 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
 			}
 			page = vm_normal_page(migrate->vma, addr, pte);
 			mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
-			mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
+			if (pte_write(pte))
+				writable = true;
 		}
 
 		/* FIXME support THP */
@@ -2284,8 +2286,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
 			ptep_get_and_clear(mm, addr, ptep);
 
 			/* Setup special migration page table entry */
-			entry = make_migration...
2019 Jul 29
2
[PATCH 9/9] mm: remove the MIGRATE_PFN_WRITE flag
...} else {
>  			if (is_zero_pfn(pfn)) {
>  				mpfn = MIGRATE_PFN_MIGRATE;
> @@ -2250,7 +2251,8 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
>  			}
>  			page = vm_normal_page(migrate->vma, addr, pte);
>  			mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
> -			mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
> +			if (pte_write(pte))
> +				writable = true;
>  		}
>  
>  		/* FIXME support THP */
> @@ -2284,8 +2286,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
>  			ptep_get_and_clear(mm, addr, ptep);
>  
>  			/* Setup special migration p...
2007 Jan 10
1
[PATCH] linux/i386: allow CONFIG_HIGHPTE on i386 (take 2)
..._REPEAT|__GFP_ZERO, 0);
+#endif
 	if (pte) {
 		SetPageForeign(pte, pte_free);
 		set_page_count(pte, 1);
 	}
-#endif
 	return pte;
 }
 
 void pte_free(struct page *pte)
 {
-	unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
+	unsigned long pfn = page_to_pfn(pte);
 
-	if (!pte_write(*virt_to_ptep(va)))
-		BUG_ON(HYPERVISOR_update_va_mapping(
-			va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));
+	if (!PageHighMem(pte)) {
+		unsigned long va = (unsigned long)__va(pfn << PAGE_SHIFT);
+
+		if (!pte_write(*virt_to_ptep(va)))
+			BUG_ON(HYPERVISOR_update_va_mapping(
+...
2024 Jan 24
1
[PATCH] mm: Remove double faults once write a device pfn
...t;> vmf_insert_pfn_prot.
>>
>> Well, that doesn't answer my question why arm64 is double faulting in the
>> first place,.
>>
>
>
> Eh.
>
> On arm64 When userspace mmap() with PROT_WRITE and MAP_SHARED the
> vma->vm_page_prot has the PTE_RDONLY and PTE_WRITE within
> PAGE_SHARED_EXEC. (seeing arm64 protection_map)
>
> When write the userspace virtual address the first fault happen and call
> into driver's .fault->ttm_bo_vm_fault_reserved->vmf_insert_pfn_prot->insert_pfn.
> The insert_pfn will establish the pte entry. However...
2008 May 23
0
[PATCH] x86/paravirt: add pte_flags to just get pte flags
...37 @@
  */
 static inline int pte_dirty(pte_t pte)
 {
-	return pte_val(pte) & _PAGE_DIRTY;
+	return pte_flags(pte) & _PAGE_DIRTY;
 }
 
 static inline int pte_young(pte_t pte)
 {
-	return pte_val(pte) & _PAGE_ACCESSED;
+	return pte_flags(pte) & _PAGE_ACCESSED;
 }
 
 static inline int pte_write(pte_t pte)
 {
-	return pte_val(pte) & _PAGE_RW;
+	return pte_flags(pte) & _PAGE_RW;
 }
 
 static inline int pte_file(pte_t pte)
 {
-	return pte_val(pte) & _PAGE_FILE;
+	return pte_flags(pte) & _PAGE_FILE;
 }
 
 static inline int pte_huge(pte_t pte)
 {
-	return pte_val(pte) & _PA...
2008 May 23
0
[PATCH] x86/paravirt: add pte_flags to just get pte flags
...37 @@
  */
 static inline int pte_dirty(pte_t pte)
 {
-	return pte_val(pte) & _PAGE_DIRTY;
+	return pte_flags(pte) & _PAGE_DIRTY;
 }
 
 static inline int pte_young(pte_t pte)
 {
-	return pte_val(pte) & _PAGE_ACCESSED;
+	return pte_flags(pte) & _PAGE_ACCESSED;
 }
 
 static inline int pte_write(pte_t pte)
 {
-	return pte_val(pte) & _PAGE_RW;
+	return pte_flags(pte) & _PAGE_RW;
 }
 
 static inline int pte_file(pte_t pte)
 {
-	return pte_val(pte) & _PAGE_FILE;
+	return pte_flags(pte) & _PAGE_FILE;
 }
 
 static inline int pte_huge(pte_t pte)
 {
-	return pte_val(pte) & _PA...
2008 Mar 20
0
[RFC/PATCH 02/15] preparation: host memory management changes for s390 kvm
...+		rcp_set_bits(ptep, _PAGE_RCP_GC);
+	if (skey & _PAGE_REFERENCED)
+		rcp_set_bits(ptep, _PAGE_RCP_GR);
+	if (rcp_test_and_clear_bits(ptep, _PAGE_RCP_HC))
+		SetPageDirty(page);
+	if (rcp_test_and_clear_bits(ptep, _PAGE_RCP_HR))
+		SetPageReferenced(page);
+#endif
+}
+
 /*
  * query functions pte_write/pte_dirty/pte_young only work if
  * pte_present() is true. Undefined behaviour if not..
@@ -599,6 +668,8 @@ static inline void pmd_clear(pmd_t *pmd)
 
 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
+	if (mm->context.pgstes)
+		ptep_rcp_copy(ptep);
 	pte_...
2020 Mar 20
0
[PATCH 2/2] mm/thp: Rename pmd_mknotpresent() as pmd_mknotvalid()
..._pte(pmd)))
 #define pmd_mkhuge(pmd)		pte_pmd(pte_mkhuge(pmd_pte(pmd)))
-#define pmd_mknotpresent(pmd)	pte_pmd(pte_mknotpresent(pmd_pte(pmd)))
+#define pmd_mknotvalid(pmd)	pte_pmd(pte_mknotpresent(pmd_pte(pmd)))
 #define pmd_mkclean(pmd)	pte_pmd(pte_mkclean(pmd_pte(pmd)))
 
 #define pmd_write(pmd)		pte_write(pmd_pte(pmd))
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index ad55ab068dbf..2943cdf2828b 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -241,7 +241,7 @@ PMD_BIT_FUNC(mkyoung,   |= PMD_SECT_AF);
 #def...
2020 Apr 22
1
[PATCH V2 0/2] mm/thp: Rename pmd_mknotpresent() as pmd_mkinvalid()
This series renames pmd_mknotpresent() as pmd_mkinvalid(). Before that it
drops an existing pmd_mknotpresent() definition from powerpc platform which
was never required as it defines it's pmdp_invalidate() through subscribing
__HAVE_ARCH_PMDP_INVALIDATE. This does not create any functional change.
This rename was suggested by Catalin during a previous discussion while we
were trying to change
2007 Apr 18
0
[patch 6/9] Guest page hinting: writable page table entries.
...age_discard(struct page *page);
@@ -84,4 +90,21 @@ static inline void page_make_volatile(st
 		__page_make_volatile(page, offset);
 }
 
+static inline void page_check_writable(struct page *page, pte_t pte)
+{
+	extern void __page_check_writable(struct page *, pte_t);
+	if (!page_host_discards() || !pte_write(pte) ||
+	    test_bit(PG_writable, &page->flags))
+		return;
+	__page_check_writable(page, pte);
+}
+
+static inline void page_reset_writable(struct page *page)
+{
+	extern void __page_reset_writable(struct page *);
+	if (!page_host_discards() || !test_bit(PG_writable, &page->flags))...
2007 Apr 18
0
[patch 6/9] Guest page hinting: writable page table entries.
...age_discard(struct page *page);
@@ -84,4 +90,21 @@ static inline void page_make_volatile(st
 		__page_make_volatile(page, offset);
 }
 
+static inline void page_check_writable(struct page *page, pte_t pte)
+{
+	extern void __page_check_writable(struct page *, pte_t);
+	if (!page_host_discards() || !pte_write(pte) ||
+	    test_bit(PG_writable, &page->flags))
+		return;
+	__page_check_writable(page, pte);
+}
+
+static inline void page_reset_writable(struct page *page)
+{
+	extern void __page_reset_writable(struct page *);
+	if (!page_host_discards() || !test_bit(PG_writable, &page->flags))...
2019 Jun 26
0
[PATCH 04/25] mm: remove MEMORY_DEVICE_PUBLIC support
...ff --git a/mm/migrate.c b/mm/migrate.c
index f2ecc2855a12..149c692d5f9b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -246,8 +246,6 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
 			if (is_device_private_page(new)) {
 				entry = make_device_private_entry(new, pte_write(pte));
 				pte = swp_entry_to_pte(entry);
-			} else if (is_device_public_page(new)) {
-				pte = pte_mkdevmap(pte);
 			}
 		}
 
@@ -381,7 +379,6 @@ static int expected_page_refs(struct address_space *mapping, struct page *page)
 	 * ZONE_DEVICE pages.
 	 */
 	expected_count += is_device_private_...
2020 Apr 22
0
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...range->dev_private_owner;
 }
 
-static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
+static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
+						 pte_t pte)
 {
 	if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
 		return 0;
-	return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
-				range->flags[HMM_PFN_WRITE] :
-				range->flags[HMM_PFN_VALID];
+	return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID;
 }
 
 static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
 			      unsigned long end, pm...
2020 May 01
0
[PATCH hmm v2 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...range->dev_private_owner;
 }
 
-static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
+static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
+						 pte_t pte)
 {
 	if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
 		return 0;
-	return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
-				range->flags[HMM_PFN_WRITE] :
-				range->flags[HMM_PFN_VALID];
+	return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID;
 }
 
 static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
 			      unsigned long end, pm...
2020 Mar 20
4
[PATCH 0/2] mm/thp: Rename pmd_mknotpresent() as pmd_mknotvalid()
This series renames pmd_mknotpresent() as pmd_mknotvalid(). Before that it
drops an existing pmd_mknotpresent() definition from powerpc platform which
was never required as it defines it's pmdp_invalidate() through subscribing
__HAVE_ARCH_PMDP_INVALIDATE. This does not create any functional change.
This rename was suggested by Catalin during a previous discussion while we
were trying to
2020 Apr 22
1
[PATCH hmm 5/5] mm/hmm: remove the customizable pfn format from hmm_range_fault
...> -static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
> +static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
> +						 pte_t pte)
>  {
>  	if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
>  		return 0;
> -	return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
> -				range->flags[HMM_PFN_WRITE] :
> -				range->flags[HMM_PFN_VALID];
> +	return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID;
>  }
>  
>  static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
&...
2020 Jun 19
0
[PATCH 13/16] mm: support THP migration to device private memory
...t, end, -1, walk);
 	}
 
-	if (unlikely(pmd_bad(*pmdp)))
+	if (unlikely(pmd_bad(pmd) || pmd_devmap(pmd)))
 		return migrate_vma_collect_skip(start, end, walk);
 
 	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
@@ -2310,8 +2352,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
 			mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
 		}
 
-		/* FIXME support THP */
-		if (!page || !page->mapping || PageTransCompound(page)) {
+		if (!page || !page->mapping) {
 			mpfn = 0;
 			goto next;
 		}
@@ -2420,14 +2461,6 @@ static bool migrate_vma_check_page(struct page *page)
 	 */
 	int extra = 1;...