search for: pagefault_disable

Displaying 20 results from an estimated 33 matches for "pagefault_disable".

2016 Dec 19
2
[PATCH v7 08/11] x86, kvm/x86.c: support vcpu preempted check
.../x86.c @@ -2844,7 +2844,17 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { + /* + * Disable page faults because we're in atomic context here. + * kvm_write_guest_offset_cached() would call might_fault() + * that relies on pagefault_disable() to tell if there's a + * bug. NOTE: the write to guest memory may not go through if + * during postcopy live migration or if there's heavy guest + * paging. + */ + pagefault_disable(); kvm_steal_time_set_preempted(vcpu); + pagefault_enable(); kvm_x86_ops->vcpu_put(vcpu); kvm_...
2016 Dec 19
2
[PATCH v7 08/11] x86, kvm/x86.c: support vcpu preempted check
.../x86.c @@ -2844,7 +2844,17 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { + /* + * Disable page faults because we're in atomic context here. + * kvm_write_guest_offset_cached() would call might_fault() + * that relies on pagefault_disable() to tell if there's a + * bug. NOTE: the write to guest memory may not go through if + * during postcopy live migration or if there's heavy guest + * paging. + */ + pagefault_disable(); kvm_steal_time_set_preempted(vcpu); + pagefault_enable(); kvm_x86_ops->vcpu_put(vcpu); kvm_...
2016 Dec 19
0
[PATCH v7 08/11] x86, kvm/x86.c: support vcpu preempted check
...after > preempt_disable)). __copy_to_user in kvm_write_guest_offset_cached > schedules and locks up the host. > yes, you are right! :) we have known the problems. I am going to introduce something like kvm_write_guest_XXX_atomic and use them instead of kvm_write_guest_offset_cached. within pagefault_disable()/enable(), we can not call __copy_to_user I think. > kvm->srcu (or kvm->slots_lock) is also not taken and > kvm_write_guest_offset_cached needs to call kvm_memslots which > requires it. > let me check the details later. thanks for pointing it out. > This I think is why postc...
2020 Nov 03
0
[patch V3 22/37] highmem: High implementation details and document API
...eturn; + kunmap_high(page); +} + +static inline struct page *kmap_to_page(void *addr) +{ + return __kmap_to_page(addr); +} + +static inline void kmap_flush_unused(void) +{ + __kmap_flush_unused(); +} + +static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) +{ + preempt_disable(); + pagefault_disable(); + return __kmap_local_page_prot(page, prot); +} + +static inline void *kmap_atomic(struct page *page) +{ + return kmap_atomic_prot(page, kmap_prot); +} + +static inline void *kmap_atomic_pfn(unsigned long pfn) +{ + preempt_disable(); + pagefault_disable(); + return __kmap_local_pfn_prot(pfn, kma...
2010 Dec 07
9
[PATCH] Btrfs: pwrite blocked when writing from the mmaped buffer of the same page
...m_user(loff_t pos, int num_pages, PAGE_CACHE_SIZE - offset, write_bytes); struct page *page = prepared_pages[pg]; again: - /* - * Copy data from userspace to the current page - * - * Disable pagefault to avoid recursive lock since the pages - * are already locked - */ - pagefault_disable(); - copied = iov_iter_copy_from_user_atomic(page, i, offset, count); - pagefault_enable(); + if (unlikely(iov_iter_fault_in_readable(i, count))) + return -EFAULT; + + /* Copy data from userspace to the current page */ + copied = iov_iter_copy_from_user(page, i, offset, count); /* Flush...
2020 Nov 03
0
[patch V3 06/37] highmem: Provide generic variant of kmap_atomic*
...prot(page, prot); } -#define kmap_atomic(page) kmap_atomic_prot(page, kmap_prot) + +static inline void __kunmap_atomic(void *vaddr) +{ + kunmap_atomic_high(vaddr); +} +#else /* !CONFIG_KMAP_LOCAL */ + +static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) +{ + preempt_disable(); + pagefault_disable(); + return __kmap_local_page_prot(page, prot); +} + +static inline void *kmap_atomic_pfn(unsigned long pfn) +{ + preempt_disable(); + pagefault_disable(); + return __kmap_local_pfn_prot(pfn, kmap_prot); +} + +static inline void __kunmap_atomic(void *addr) +{ + kunmap_local_indexed(addr); +} + +#en...
2020 Nov 03
0
[patch V3 19/37] mm/highmem: Remove the old kmap_atomic cruft
...IRQ contexts, so in some (very limited) cases we need * it. */ - -#ifndef CONFIG_KMAP_LOCAL -void *kmap_atomic_high_prot(struct page *page, pgprot_t prot); -void kunmap_atomic_high(void *kvaddr); - static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) { preempt_disable(); pagefault_disable(); - if (!PageHighMem(page)) - return page_address(page); - return kmap_atomic_high_prot(page, prot); -} - -static inline void __kunmap_atomic(void *vaddr) -{ - kunmap_atomic_high(vaddr); + return __kmap_local_page_prot(page, prot); } -#else /* !CONFIG_KMAP_LOCAL */ -static inline void *kmap_at...
2020 Nov 03
0
[patch V3 20/37] io-mapping: Cleanup atomic iomap
...ot(unsigned long pfn, pgprot_t prot) { /* * For non-PAT systems, translate non-WB request to UC- just in @@ -60,8 +60,6 @@ void __iomem *iomap_atomic_pfn_prot(unsi /* Filter out unsupported __PAGE_KERNEL* bits: */ pgprot_val(prot) &= __default_kernel_pte_mask; - preempt_disable(); - pagefault_disable(); return (void __force __iomem *)__kmap_local_pfn_prot(pfn, prot); } -EXPORT_SYMBOL_GPL(iomap_atomic_pfn_prot); +EXPORT_SYMBOL_GPL(__iomap_local_pfn_prot); --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -69,13 +69,17 @@ io_mapping_map_atomic_wc(struct io_mappi BUG_ON(o...
2023 Mar 21
1
[PATCH v3 3/8] vringh: replace kmap_atomic() with kmap_local_page()
...bles migration). The code within the mappings/un-mappings in getu16_iotlb() and putu16_iotlb() don't depend on the above-mentioned side effects of kmap_atomic(), so that mere replacements of the old API with the new one is all that is required (i.e., there is no need to explicitly add calls to pagefault_disable() and/or preempt_disable()). This commit reuses a "boiler plate" commit message from Fabio, who has already did this change in several places. Cc: "Fabio M. De Francesco" <fmdefrancesco at gmail.com> Signed-off-by: Stefano Garzarella <sgarzare at redhat.com> --- N...
2020 Nov 03
45
[patch V3 00/37] mm/highmem: Preemptible variant of kmap_atomic & friends
Following up to the discussion in: https://lore.kernel.org/r/20200914204209.256266093 at linutronix.de and the second version of this: https://lore.kernel.org/r/20201029221806.189523375 at linutronix.de this series provides a preemptible variant of kmap_atomic & related interfaces. This is achieved by: - Removing the RT dependency from migrate_disable/enable() - Consolidating all
2020 Nov 03
45
[patch V3 00/37] mm/highmem: Preemptible variant of kmap_atomic & friends
Following up to the discussion in: https://lore.kernel.org/r/20200914204209.256266093 at linutronix.de and the second version of this: https://lore.kernel.org/r/20201029221806.189523375 at linutronix.de this series provides a preemptible variant of kmap_atomic & related interfaces. This is achieved by: - Removing the RT dependency from migrate_disable/enable() - Consolidating all
2020 Nov 03
45
[patch V3 00/37] mm/highmem: Preemptible variant of kmap_atomic & friends
Following up to the discussion in: https://lore.kernel.org/r/20200914204209.256266093 at linutronix.de and the second version of this: https://lore.kernel.org/r/20201029221806.189523375 at linutronix.de this series provides a preemptible variant of kmap_atomic & related interfaces. This is achieved by: - Removing the RT dependency from migrate_disable/enable() - Consolidating all
2019 May 07
0
[PATCH RFC] vhost: don't use kmap() to log dirty pages
...t; nr; > + r = put_user(old_log, addr); > + if (r < 0) > + return r; And this just looks odd to me. Why do we need the futex call to replace a 0 value with 0? Why does it still duplicate the put_user? This doesn't look like actually working code to me. Also don't we need a pagefault_disable() around futex_atomic_cmpxchg_inatomic?
2020 Apr 04
0
[PATCH 1/6] amdgpu: a NULL ->mm does not mean a thread is a kthread
...pu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 13feb313e9b3..4db143c19dcc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -190,7 +190,7 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s pagefault_disable(); \ if ((mmptr) == current->mm) { \ valid = !get_user((dst), (wptr)); \ - } else if (current->mm == NULL) { \ + } else if (current->flags & PF_KTHREAD) { \ use_mm(mmptr); \ valid = !get_user((dst), (wptr)); \ unuse_mm(mmptr); \ -- 2.25.1
2023 Mar 16
0
[PATCH v2 3/8] vringh: replace kmap_atomic() with kmap_local_page()
...e mappings/un-mappings in getu16_iotlb() and >> putu16_iotlb() don't depend on the above-mentioned side effects of >> kmap_atomic(), so that mere replacements of the old API with the new one >> is all that is required (i.e., there is no need to explicitly add calls >> to pagefault_disable() and/or preempt_disable()). > >It seems that my commit message is quite clear and complete and therefore has >already been reused by others who have somehow given me credit. > >I would really appreciate it being mentioned here that you are reusing a >"boiler plate" comm...
2019 May 07
4
[PATCH RFC] vhost: don't use kmap() to log dirty pages
Vhost log dirty pages directly to a userspace bitmap through GUP and kmap_atomic() since kernel doesn't have a set_bit_to_user() helper. This will cause issues for the arch that has virtually tagged caches. The way to fix is to keep using userspace virtual address. Fortunately, futex has a cmpxchg to userspace memory helper futex_atomic_cmpxchg_inatomic(). So switch to use it to exchange the
2019 May 07
4
[PATCH RFC] vhost: don't use kmap() to log dirty pages
Vhost log dirty pages directly to a userspace bitmap through GUP and kmap_atomic() since kernel doesn't have a set_bit_to_user() helper. This will cause issues for the arch that has virtually tagged caches. The way to fix is to keep using userspace virtual address. Fortunately, futex has a cmpxchg to userspace memory helper futex_atomic_cmpxchg_inatomic(). So switch to use it to exchange the
2020 Nov 03
0
[patch V3 13/37] mips/mm/highmem: Switch to generic kmap atomic
...kmap_atomic_idx_pop(); -} -EXPORT_SYMBOL(kunmap_atomic_high); - -/* - * This is the same as kmap_atomic() but can map memory that doesn't - * have a struct page associated with it. - */ -void *kmap_atomic_pfn(unsigned long pfn) -{ - unsigned long vaddr; - int idx, type; - - preempt_disable(); - pagefault_disable(); - - type = kmap_atomic_idx_push(); - idx = type + KM_TYPE_NR*smp_processor_id(); - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); - flush_tlb_one(vaddr); - - return (void*) vaddr; -} - -void __init kmap_init(void) -{ - unsigned long kmap_vstart;...
2020 Nov 03
0
[patch V3 10/37] ARM: highmem: Switch to generic kmap atomic
...ess was obtained through kmap_high_get() */ - kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); - } -} -EXPORT_SYMBOL(kunmap_atomic_high); - -void *kmap_atomic_pfn(unsigned long pfn) -{ - unsigned long vaddr; - int idx, type; - struct page *page = pfn_to_page(pfn); - - preempt_disable(); - pagefault_disable(); - if (!PageHighMem(page)) - return page_address(page); - - type = kmap_atomic_idx_push(); - idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); - vaddr = __fix_to_virt(idx); -#ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(!pte_none(get_fixmap_pte(vaddr))); -#endif - set_fixmap_pte(idx, pfn_pte...
2016 Nov 02
13
[PATCH v7 00/11] implement vcpu preempted check
change from v6: fix typos and remove uncessary comments. change from v5: spilt x86/kvm patch into guest/host part. introduce kvm_write_guest_offset_cached. fix some typos. rebase patch onto 4.9.2 change from v4: spilt x86 kvm vcpu preempted check into two patches. add documentation patch. add x86 vcpu preempted check patch under xen add s390 vcpu preempted check patch change from v3: