Displaying 20 results from an estimated 165 matches for "kunmap_atom".
Did you mean:
kunmap_atomic
2008 Oct 27
0
[PATCH 4/4] linux/i386: utilize hypervisor highmem handling helpers
...assist))
+ && PageHighMem(page)) {
+ struct mmuext_op meo;
+
+ meo.cmd = MMUEXT_CLEAR_PAGE;
+ meo.arg1.mfn = pfn_to_mfn(page_to_pfn(page));
+ if (HYPERVISOR_mmuext_op(&meo, 1, NULL, DOMID_SELF) == 0)
+ return;
+ }
+
+ kaddr = kmap_atomic(page, KM_USER0);
+ clear_page(kaddr);
+ kunmap_atomic(kaddr, KM_USER0);
+}
+
+void copy_highpage(struct page *to, struct page *from)
+{
+ void *vfrom, *vto;
+
+ if (likely(xen_feature(XENFEAT_highmem_assist))
+ && (PageHighMem(from) || PageHighMem(to))) {
+ unsigned long from_pfn = page_to_pfn(from);
+ unsigned long to_pfn = page_to_pf...
2020 Nov 03
0
[patch V3 22/37] highmem: High implementation details and document API
...page, prot);
+}
+
+static inline void *kmap_atomic(struct page *page)
+{
+ return kmap_atomic_prot(page, kmap_prot);
+}
+
+static inline void *kmap_atomic_pfn(unsigned long pfn)
+{
+ preempt_disable();
+ pagefault_disable();
+ return __kmap_local_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_atomic(void *addr)
+{
+ kunmap_local_indexed(addr);
+ pagefault_enable();
+ preempt_enable();
+}
+
+unsigned int __nr_free_highpages(void);
+extern atomic_long_t _totalhigh_pages;
+
+static inline unsigned int nr_free_highpages(void)
+{
+ return __nr_free_highpages();
+}
+
+static inline unsigned long t...
2007 Apr 18
1
how set_pte_at()'s vaddr and ptep args relate
...quot;addr"
and "ptep" args are. I presume that in general the ptep points to the
pte entry which corresponds to the vaddr, but is this necessarily the case?
For example, it is valid to pass a non-highmem page kmap_atomic(), which
will simply return a direct pointer to the page.
kunmap_atomic() takes this address, as well as the kmap slot index, and
ends up calling:
set_pte_at(&init_mm, lowmem_vaddr, kmap_ptep, 0);
ie, the vaddr and the ptep bear no relationship to each other. Is this
a bug in kunmap_atomic (it shouldn't try to clear the pte for lowmem
addresses), or...
2007 Apr 18
1
how set_pte_at()'s vaddr and ptep args relate
...quot;addr"
and "ptep" args are. I presume that in general the ptep points to the
pte entry which corresponds to the vaddr, but is this necessarily the case?
For example, it is valid to pass a non-highmem page kmap_atomic(), which
will simply return a direct pointer to the page.
kunmap_atomic() takes this address, as well as the kmap slot index, and
ends up calling:
set_pte_at(&init_mm, lowmem_vaddr, kmap_ptep, 0);
ie, the vaddr and the ptep bear no relationship to each other. Is this
a bug in kunmap_atomic (it shouldn't try to clear the pte for lowmem
addresses), or...
2019 Mar 12
0
[RFC PATCH V2 0/5] vhost: accelerate metadata access through vmap()
...sure there must be workarounds elsewhere in the other arch code
> otherwise things like this, which appear all over drivers/, wouldn't
> work:
>
> drivers/scsi/isci/request.c:1430
>
> kaddr = kmap_atomic(page);
> memcpy(kaddr + sg->offset, src_addr, copy_len);
> kunmap_atomic(kaddr);
>
Are you sure "page" is an userland page with an alias address?
sg->page_link = (unsigned long)virt_to_page(addr);
page_link seems to point to kernel memory.
I found an apparent solution like parisc on arm 32bit:
void __kunmap_atomic(void *kvaddr)
{
unsigned long...
2019 Mar 11
4
[RFC PATCH V2 0/5] vhost: accelerate metadata access through vmap()
From: "Michael S. Tsirkin" <mst at redhat.com>
Date: Mon, 11 Mar 2019 09:59:28 -0400
> On Mon, Mar 11, 2019 at 03:13:17PM +0800, Jason Wang wrote:
>>
>> On 2019/3/8 ??10:12, Christoph Hellwig wrote:
>> > On Wed, Mar 06, 2019 at 02:18:07AM -0500, Jason Wang wrote:
>> > > This series tries to access virtqueue metadata through kernel virtual
2019 Mar 11
4
[RFC PATCH V2 0/5] vhost: accelerate metadata access through vmap()
From: "Michael S. Tsirkin" <mst at redhat.com>
Date: Mon, 11 Mar 2019 09:59:28 -0400
> On Mon, Mar 11, 2019 at 03:13:17PM +0800, Jason Wang wrote:
>>
>> On 2019/3/8 ??10:12, Christoph Hellwig wrote:
>> > On Wed, Mar 06, 2019 at 02:18:07AM -0500, Jason Wang wrote:
>> > > This series tries to access virtqueue metadata through kernel virtual
2020 Nov 03
0
[patch V3 32/37] drm/vmgfx: Replace kmap_atomic()
...-----
1 file changed, 12 insertions(+), 18 deletions(-)
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -375,12 +375,12 @@ static int vmw_bo_cpu_blit_line(struct v
copy_size = min_t(u32, copy_size, PAGE_SIZE - src_page_offset);
if (unmap_src) {
- kunmap_atomic(d->src_addr);
+ kunmap_local(d->src_addr);
d->src_addr = NULL;
}
if (unmap_dst) {
- kunmap_atomic(d->dst_addr);
+ kunmap_local(d->dst_addr);
d->dst_addr = NULL;
}
@@ -388,12 +388,8 @@ static int vmw_bo_cpu_blit_line(struct v
if (WARN_ON_ONCE(dst_pa...
2020 Nov 03
0
[patch V3 06/37] highmem: Provide generic variant of kmap_atomic*
...G_KMAP_LOCAL
+void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
+void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
+void kunmap_local_indexed(void *vaddr);
+#endif
+
#ifdef CONFIG_HIGHMEM
-extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
-extern void kunmap_atomic_high(void *kvaddr);
#include <asm/highmem.h>
#ifndef ARCH_HAS_KMAP_FLUSH_TLB
@@ -81,6 +88,11 @@ static inline void kunmap(struct page *p
* be used in IRQ contexts, so in some (very limited) cases we need
* it.
*/
+
+#ifndef CONFIG_KMAP_LOCAL
+void *kmap_atomic_high_prot(struct pag...
2023 Mar 21
1
[PATCH v3 3/8] vringh: replace kmap_atomic() with kmap_local_page()
...t/vringh.c
@@ -1220,10 +1220,10 @@ static inline int getu16_iotlb(const struct vringh *vrh,
if (ret < 0)
return ret;
- kaddr = kmap_atomic(iov.bv_page);
+ kaddr = kmap_local_page(iov.bv_page);
from = kaddr + iov.bv_offset;
*val = vringh16_to_cpu(vrh, READ_ONCE(*(__virtio16 *)from));
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
return 0;
}
@@ -1241,10 +1241,10 @@ static inline int putu16_iotlb(const struct vringh *vrh,
if (ret < 0)
return ret;
- kaddr = kmap_atomic(iov.bv_page);
+ kaddr = kmap_local_page(iov.bv_page);
to = kaddr + iov.bv_offset;
WRITE_ONCE(*(__virtio16 *...
2020 Nov 03
0
[patch V3 29/37] ARM: mm: Replace kmap_atomic_pfn()
...49,9 +49,9 @@ static inline unsigned long l2_get_va(un
* we simply install a virtual mapping for it only for the
* TLB lookup to occur, hence no need to flush the untouched
* memory mapping afterwards (note: a cache flush may happen
- * in some circumstances depending on the path taken in kunmap_atomic).
+ * in some circumstances depending on the path taken in kunmap_local).
*/
- void *vaddr = kmap_atomic_pfn(paddr >> PAGE_SHIFT);
+ void *vaddr = kmap_local_pfn(paddr >> PAGE_SHIFT);
return (unsigned long)vaddr + (paddr & ~PAGE_MASK);
#else
return __phys_to_virt(paddr);...
2007 Apr 18
2
pte_offset_map + lazy mmu
Is pte_offset_map allowed to happen within lazy mmu? I presume not,
because you definitely don't want the mapping pte update to be deferred.
Or, more specifically, is kunmap_atomic ever allowed within lazy mmu?
I'm looking at kpte_clear_flush; I've already got a patch which turns
this into a pv_op, along with a Xen implementation. But I think its
probably an excess pv_op for a relatively minor corner case. It seems
to me that it would be better to define kpte_cle...
2007 Apr 18
2
pte_offset_map + lazy mmu
Is pte_offset_map allowed to happen within lazy mmu? I presume not,
because you definitely don't want the mapping pte update to be deferred.
Or, more specifically, is kunmap_atomic ever allowed within lazy mmu?
I'm looking at kpte_clear_flush; I've already got a patch which turns
this into a pv_op, along with a Xen implementation. But I think its
probably an excess pv_op for a relatively minor corner case. It seems
to me that it would be better to define kpte_cle...
2016 Mar 11
0
[PATCH v1 06/19] zsmalloc: clean up many BUG_ON
...class *class,
unsigned long f_objidx, f_offset;
void *vaddr;
- BUG_ON(!obj);
-
obj &= ~OBJ_ALLOCATED_TAG;
obj_to_location(obj, &f_page, &f_objidx);
first_page = get_first_page(f_page);
@@ -1546,7 +1540,6 @@ static void zs_object_copy(unsigned long dst, unsigned long src,
kunmap_atomic(d_addr);
kunmap_atomic(s_addr);
s_page = get_next_page(s_page);
- BUG_ON(!s_page);
s_addr = kmap_atomic(s_page);
d_addr = kmap_atomic(d_page);
s_size = class->size - written;
@@ -1556,7 +1549,6 @@ static void zs_object_copy(unsigned long dst, unsigned long src,
if (d_o...
2019 Mar 12
9
[RFC PATCH V2 0/5] vhost: accelerate metadata access through vmap()
...??? int bit = nr + (log % PAGE_SIZE) * 8;
> ??????? int r;
>
> ??????? r = get_user_pages_fast(log, 1, 1, &page);
> ??????? if (r < 0)
> ??????????????? return r;
> ??????? BUG_ON(r != 1);
> ??????? base = kmap_atomic(page);
> ??????? set_bit(bit, base);
> ??????? kunmap_atomic(base);
> ??????? set_page_dirty_lock(page);
> ??????? put_page(page);
> ??????? return 0;
> }
>
> Thanks
I think you are right. The correct fix though is to re-implement
it using asm and handling pagefault, not gup.
Three atomic ops per bit is way to expensive.
--
MST
2019 Mar 12
9
[RFC PATCH V2 0/5] vhost: accelerate metadata access through vmap()
...??? int bit = nr + (log % PAGE_SIZE) * 8;
> ??????? int r;
>
> ??????? r = get_user_pages_fast(log, 1, 1, &page);
> ??????? if (r < 0)
> ??????????????? return r;
> ??????? BUG_ON(r != 1);
> ??????? base = kmap_atomic(page);
> ??????? set_bit(bit, base);
> ??????? kunmap_atomic(base);
> ??????? set_page_dirty_lock(page);
> ??????? put_page(page);
> ??????? return 0;
> }
>
> Thanks
I think you are right. The correct fix though is to re-implement
it using asm and handling pagefault, not gup.
Three atomic ops per bit is way to expensive.
--
MST
2018 Feb 07
2
[PATCH v27 3/4] mm/page_poison: expose page_poisoning_enabled to kernel modules
...gt; + *
> + * Return true if page poisoning is enabled, or false if not.
> + */
> static void poison_page(struct page *page)
> {
> void *addr = kmap_atomic(page);
> @@ -37,6 +42,7 @@ static void poison_page(struct page *page)
> memset(addr, PAGE_POISON, PAGE_SIZE);
> kunmap_atomic(addr);
> }
> +EXPORT_SYMBOL_GPL(page_poisoning_enabled);
>
> static void poison_pages(struct page *page, int n)
> {
Looks like both the comment and the export are in the wrong place.
I'm a bit concerned that callers also in fact poke at the
PAGE_POISON - exporting that se...
2018 Feb 07
2
[PATCH v27 3/4] mm/page_poison: expose page_poisoning_enabled to kernel modules
...gt; + *
> + * Return true if page poisoning is enabled, or false if not.
> + */
> static void poison_page(struct page *page)
> {
> void *addr = kmap_atomic(page);
> @@ -37,6 +42,7 @@ static void poison_page(struct page *page)
> memset(addr, PAGE_POISON, PAGE_SIZE);
> kunmap_atomic(addr);
> }
> +EXPORT_SYMBOL_GPL(page_poisoning_enabled);
>
> static void poison_pages(struct page *page, int n)
> {
Looks like both the comment and the export are in the wrong place.
I'm a bit concerned that callers also in fact poke at the
PAGE_POISON - exporting that se...
2020 Nov 03
45
[patch V3 00/37] mm/highmem: Preemptible variant of kmap_atomic & friends
Following up to the discussion in:
https://lore.kernel.org/r/20200914204209.256266093 at linutronix.de
and the second version of this:
https://lore.kernel.org/r/20201029221806.189523375 at linutronix.de
this series provides a preemptible variant of kmap_atomic & related
interfaces.
This is achieved by:
- Removing the RT dependency from migrate_disable/enable()
- Consolidating all
2020 Nov 03
45
[patch V3 00/37] mm/highmem: Preemptible variant of kmap_atomic & friends
Following up to the discussion in:
https://lore.kernel.org/r/20200914204209.256266093 at linutronix.de
and the second version of this:
https://lore.kernel.org/r/20201029221806.189523375 at linutronix.de
this series provides a preemptible variant of kmap_atomic & related
interfaces.
This is achieved by:
- Removing the RT dependency from migrate_disable/enable()
- Consolidating all