Displaying 20 results from an estimated 31 matches for "is_device_private_entry".
2020 Jun 22
2
[PATCH 13/16] mm: support THP migration to device private memory
...gt;>> } else if (thp_migration_supported()) {
>>> swp_entry_t entry;
>>>
>>> - VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
>>> entry = pmd_to_swp_entry(orig_pmd);
>>> - page = pfn_to_page(swp_offset(entry));
>>> + if (is_device_private_entry(entry)) {
>>> + page = device_private_entry_to_page(entry);
>>> + is_anon = PageAnon(page);
>>> + page_remove_rmap(page, true);
>>> + VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
>>> + VM_BUG_ON_PAGE(!PageHead(page), page);
>>&g...
2020 Jun 22
2
[PATCH 13/16] mm: support THP migration to device private memory
...()) {
>>>>> swp_entry_t entry;
>>>>>
>>>>> - VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
>>>>> entry = pmd_to_swp_entry(orig_pmd);
>>>>> - page = pfn_to_page(swp_offset(entry));
>>>>> + if (is_device_private_entry(entry)) {
>>>>> + page = device_private_entry_to_page(entry);
>>>>> + is_anon = PageAnon(page);
>>>>> + page_remove_rmap(page, true);
>>>>> + VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
>>>>> + VM_BUG_ON...
2020 Jun 21
2
[PATCH 13/16] mm: support THP migration to device private memory
...0, page);
> VM_BUG_ON_PAGE(!PageHead(page), page);
> } else if (thp_migration_supported()) {
> swp_entry_t entry;
>
> - VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
> entry = pmd_to_swp_entry(orig_pmd);
> - page = pfn_to_page(swp_offset(entry));
> + if (is_device_private_entry(entry)) {
> + page = device_private_entry_to_page(entry);
> + is_anon = PageAnon(page);
> + page_remove_rmap(page, true);
> + VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
> + VM_BUG_ON_PAGE(!PageHead(page), page);
> + put_page(page);
Why do you hide this co...
2020 Jun 22
0
[PATCH 13/16] mm: support THP migration to device private memory
...(thp_migration_supported()) {
>>>> swp_entry_t entry;
>>>>
>>>> - VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
>>>> entry = pmd_to_swp_entry(orig_pmd);
>>>> - page = pfn_to_page(swp_offset(entry));
>>>> + if (is_device_private_entry(entry)) {
>>>> + page = device_private_entry_to_page(entry);
>>>> + is_anon = PageAnon(page);
>>>> + page_remove_rmap(page, true);
>>>> + VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
>>>> + VM_BUG_ON_PAGE(!PageHead(page...
2020 Jun 22
0
[PATCH 13/16] mm: support THP migration to device private memory
...>>>> - VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
> >>>>> entry = pmd_to_swp_entry(orig_pmd);
> >>>>> - page = pfn_to_page(swp_offset(entry));
> >>>>> + if (is_device_private_entry(entry)) {
> >>>>> + page = device_private_entry_to_page(entry);
> >>>>> + is_anon = PageAnon(page);
> >>>>> + page_remove_rmap(page, true);
> >>>>> +...
2020 Mar 21
1
[PATCH 4/4] mm: check the device private page owner in hmm_range_fault
...age
> > cannot be removed from the vma without holding mmap_sem in write or
> > something?
>
> I don't think there is any specific protection. Let me see if we
> can throw in a get_dev_pagemap here
The page tables are RCU protected right? could we do something like
if (is_device_private_entry()) {
rcu_read_lock()
if (READ_ONCE(*ptep) != pte)
return -EBUSY;
hmm_is_device_private_entry()
rcu_read_unlock()
}
?
Then pgmap needs a synchronize_rcu before the struct page's are
destroyed (possibly gup_fast already requires this?)
I've got some...
2020 Mar 16
0
[PATCH 4/4] mm: check the device private page owner in hmm_range_fault
...git a/mm/hmm.c b/mm/hmm.c
index cfad65f6a67b..b75b3750e03d 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -216,6 +216,14 @@ int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
unsigned long end, uint64_t *pfns, pmd_t pmd);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline bool hmm_is_device_private_entry(struct hmm_range *range,
+ swp_entry_t entry)
+{
+ return is_device_private_entry(entry) &&
+ device_private_entry_to_page(entry)->pgmap->owner ==
+ range->dev_private_owner;
+}
+
static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
{
if (pte_none(...
2020 Jun 22
2
[PATCH 13/16] mm: support THP migration to device private memory
...; - VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
> > >>>>> entry = pmd_to_swp_entry(orig_pmd);
> > >>>>> - page = pfn_to_page(swp_offset(entry));
> > >>>>> + if (is_device_private_entry(entry)) {
> > >>>>> + page = device_private_entry_to_page(entry);
> > >>>>> + is_anon = PageAnon(page);
> > >>>>> + page_remove_rmap(page, true);
> > >>...
2020 Mar 20
2
[PATCH 4/4] mm: check the device private page owner in hmm_range_fault
...hmm.c
> index cfad65f6a67b..b75b3750e03d 100644
> +++ b/mm/hmm.c
> @@ -216,6 +216,14 @@ int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
> unsigned long end, uint64_t *pfns, pmd_t pmd);
> #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
>
> +static inline bool hmm_is_device_private_entry(struct hmm_range *range,
> + swp_entry_t entry)
> +{
> + return is_device_private_entry(entry) &&
> + device_private_entry_to_page(entry)->pgmap->owner ==
> + range->dev_private_owner;
> +}
Thinking about this some more, does the locking work out here?
hmm_ra...
2020 Jun 22
0
[PATCH 13/16] mm: support THP migration to device private memory
...GE(!PageHead(page), page);
>> } else if (thp_migration_supported()) {
>> swp_entry_t entry;
>>
>> - VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
>> entry = pmd_to_swp_entry(orig_pmd);
>> - page = pfn_to_page(swp_offset(entry));
>> + if (is_device_private_entry(entry)) {
>> + page = device_private_entry_to_page(entry);
>> + is_anon = PageAnon(page);
>> + page_remove_rmap(page, true);
>> + VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
>> + VM_BUG_ON_PAGE(!PageHead(page), page);
>> + put_page(page);...
2020 Mar 17
2
[PATCH 3/4] mm: simplify device private page handling in hmm_range_fault
On Tue, Mar 17, 2020 at 09:15:36AM -0300, Jason Gunthorpe wrote:
> > Getting rid of HMM_PFN_DEVICE_PRIVATE seems reasonable to me since a driver can
> > look at the struct page but what if a driver needs to fault in a page from
> > another device's private memory? Should it call handle_mm_fault()?
>
> Isn't that what this series basically does?
>
> The
2020 Mar 16
14
ensure device private pages have an owner v2
When acting on device private mappings a driver needs to know if the
device (or other entity in case of kvmppc) actually owns this private
mapping. This series adds an owner field and converts the migrate_vma
code over to check it. I looked into doing the same for
hmm_range_fault, but as far as I can tell that code has never been
wired up to actually work for device private memory, so instead of
2020 Jun 19
0
[PATCH 13/16] mm: support THP migration to device private memory
..._BUG_ON_PAGE(page_mapcount(page) < 0, page);
VM_BUG_ON_PAGE(!PageHead(page), page);
} else if (thp_migration_supported()) {
swp_entry_t entry;
- VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
entry = pmd_to_swp_entry(orig_pmd);
- page = pfn_to_page(swp_offset(entry));
+ if (is_device_private_entry(entry)) {
+ page = device_private_entry_to_page(entry);
+ is_anon = PageAnon(page);
+ page_remove_rmap(page, true);
+ VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
+ VM_BUG_ON_PAGE(!PageHead(page), page);
+ put_page(page);
+ } else {
+ VM_BUG_ON(!is_pmd_migration_entry(or...
2020 Nov 06
0
[PATCH v3 3/6] mm: support THP migration to device private memory
..._BUG_ON_PAGE(page_mapcount(page) < 0, page);
VM_BUG_ON_PAGE(!PageHead(page), page);
} else if (thp_migration_supported()) {
swp_entry_t entry;
- VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
entry = pmd_to_swp_entry(orig_pmd);
- page = pfn_to_page(swp_offset(entry));
+ if (is_device_private_entry(entry)) {
+ page = device_private_entry_to_page(entry);
+ is_anon = PageAnon(page);
+ page_remove_rmap(page, true);
+ VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
+ VM_BUG_ON_PAGE(!PageHead(page), page);
+ put_page(page);
+ } else {
+ VM_BUG_ON(!is_pmd_migration_entry(or...
2020 Mar 17
0
[PATCH 3/4] mm: simplify device private page handling in hmm_range_fault
...m/hmm.c b/mm/hmm.c
index b75b3750e03d..2884a3d11a1f 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -276,7 +276,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
if (!fault && !write_fault)
return 0;
- if (!non_swap_entry(entry))
+ if (!non_swap_entry(entry) || is_device_private_entry(entry))
goto fault;
if (is_migration_entry(entry)) {
2020 Mar 16
0
[PATCH 3/4] mm: simplify device private page handling in hmm_range_fault
...walk, unsigned long addr,
swp_entry_t entry = pte_to_swp_entry(pte);
/*
- * This is a special swap entry, ignore migration, use
- * device and report anything else as error.
+ * Never fault in device private pages pages, but just report
+ * the PFN even if not present.
*/
if (is_device_private_entry(entry)) {
- cpu_flags = range->flags[HMM_PFN_VALID] |
- range->flags[HMM_PFN_DEVICE_PRIVATE];
- cpu_flags |= is_write_device_private_entry(entry) ?
- range->flags[HMM_PFN_WRITE] : 0;
- hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
- &fault, &write_fault);...
2020 Mar 16
4
ensure device private pages have an owner
When acting on device private mappings a driver needs to know if the
device (or other entity in case of kvmppc) actually owns this private
mapping. This series adds an owner field and converts the migrate_vma
code over to check it. I looked into doing the same for
hmm_range_fault, but as far as I can tell that code has never been
wired up to actually work for device private memory, so instead of
2020 Mar 17
3
[PATCH 3/4] mm: simplify device private page handling in hmm_range_fault
...ndex b75b3750e03d..2884a3d11a1f 100644
> +++ b/mm/hmm.c
> @@ -276,7 +276,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
> if (!fault && !write_fault)
> return 0;
>
> - if (!non_swap_entry(entry))
> + if (!non_swap_entry(entry) || is_device_private_entry(entry))
> goto fault;
Yes, OK, makes sense.
I've been using v7 of Ralph's tester and it is working well - it has
DEVICE_PRIVATE support so I think it can test this flow too. Ralph are
you able?
This hunk seems trivial enough to me, can we include it now?
Thanks,
Jason
2020 Mar 16
0
[PATCH 2/2] mm: remove device private page support from hmm_range_fault
...ID]);
@@ -259,25 +250,6 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
if (!pte_present(pte)) {
swp_entry_t entry = pte_to_swp_entry(pte);
- /*
- * This is a special swap entry, ignore migration, use
- * device and report anything else as error.
- */
- if (is_device_private_entry(entry)) {
- cpu_flags = range->flags[HMM_PFN_VALID] |
- range->flags[HMM_PFN_DEVICE_PRIVATE];
- cpu_flags |= is_write_device_private_entry(entry) ?
- range->flags[HMM_PFN_WRITE] : 0;
- hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
- &fault, &write_fault);...
2019 Jun 13
0
[PATCH 10/22] memremap: add a migrate callback to struct dev_pagemap_ops
...evmem->resource;
diff --git a/mm/memory.c b/mm/memory.c
index ddf20bd0c317..cbf3cb598436 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2782,13 +2782,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
migration_entry_wait(vma->vm_mm, vmf->pmd,
vmf->address);
} else if (is_device_private_entry(entry)) {
- /*
- * For un-addressable device memory we call the pgmap
- * fault handler callback. The callback must migrate
- * the page back to some CPU accessible page.
- */
- ret = device_private_entry_fault(vma, vmf->address, entry,
- vmf->flags, vmf->pmd);
+ vm...