Christoph Hellwig
2021-Dec-09 06:38 UTC
[PATCH 1/5] uio: remove copy_from_iter_flushcache() and copy_mc_to_iter()
These two wrappers are never used.
Signed-off-by: Christoph Hellwig <hch at lst.de>
---
drivers/nvdimm/pmem.c | 4 ++--
include/linux/uio.h | 20 +-------------------
2 files changed, 3 insertions(+), 21 deletions(-)
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 4190c8c46ca88..8294f1c701baa 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -302,8 +302,8 @@ static long pmem_dax_direct_access(struct dax_device
*dax_dev,
}
/*
- * Use the 'no check' versions of copy_from_iter_flushcache() and
- * copy_mc_to_iter() to bypass HARDENED_USERCOPY overhead. Bounds
+ * Use the 'no check' versions of _copy_from_iter_flushcache() and
+ * _copy_mc_to_iter() to bypass HARDENED_USERCOPY overhead. Bounds
* checking, both file offset and device offset, is handled by
* dax_iomap_actor()
*/
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 6350354f97e90..494d552c1d663 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -196,7 +196,7 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes,
struct iov_iter *i)
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
/*
* Note, users like pmem that depend on the stricter semantics of
- * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
+ * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for
* IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
* destination is flushed from the cache on return.
*/
@@ -211,24 +211,6 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes,
struct iov_iter *i);
#define _copy_mc_to_iter _copy_to_iter
#endif
-static __always_inline __must_check
-size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
-{
- if (unlikely(!check_copy_size(addr, bytes, false)))
- return 0;
- else
- return _copy_from_iter_flushcache(addr, bytes, i);
-}
-
-static __always_inline __must_check
-size_t copy_mc_to_iter(void *addr, size_t bytes, struct iov_iter *i)
-{
- if (unlikely(!check_copy_size(addr, bytes, true)))
- return 0;
- else
- return _copy_mc_to_iter(addr, bytes, i);
-}
-
size_t iov_iter_zero(size_t bytes, struct iov_iter *);
unsigned long iov_iter_alignment(const struct iov_iter *i);
unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
--
2.30.2
Dan Williams
2021-Dec-12 14:22 UTC
[PATCH 1/5] uio: remove copy_from_iter_flushcache() and copy_mc_to_iter()
On Wed, Dec 8, 2021 at 10:38 PM Christoph Hellwig <hch at lst.de> wrote:> > These two wrappers are never used. > > Signed-off-by: Christoph Hellwig <hch at lst.de> > --- > drivers/nvdimm/pmem.c | 4 ++-- > include/linux/uio.h | 20 +------------------- > 2 files changed, 3 insertions(+), 21 deletions(-) > > diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c > index 4190c8c46ca88..8294f1c701baa 100644 > --- a/drivers/nvdimm/pmem.c > +++ b/drivers/nvdimm/pmem.c > @@ -302,8 +302,8 @@ static long pmem_dax_direct_access(struct dax_device *dax_dev, > } > > /* > - * Use the 'no check' versions of copy_from_iter_flushcache() and > - * copy_mc_to_iter() to bypass HARDENED_USERCOPY overhead. Bounds > + * Use the 'no check' versions of _copy_from_iter_flushcache() and > + * _copy_mc_to_iter() to bypass HARDENED_USERCOPY overhead. Bounds > * checking, both file offset and device offset, is handled by > * dax_iomap_actor() > */This comment change does not make sense since it is saying why pmem is using the "_" versions. However, I assume this whole comment goes away in a later patch.> diff --git a/include/linux/uio.h b/include/linux/uio.h > index 6350354f97e90..494d552c1d663 100644 > --- a/include/linux/uio.h > +++ b/include/linux/uio.h > @@ -196,7 +196,7 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) > #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE > /* > * Note, users like pmem that depend on the stricter semantics of > - * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for > + * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for > * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the > * destination is flushed from the cache on return. > */Same here.> @@ -211,24 +211,6 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i); > #define _copy_mc_to_iter _copy_to_iter > #endif > > -static __always_inline __must_check > -size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) > -{ > - if (unlikely(!check_copy_size(addr, bytes, false))) > - return 0; > - else > - return _copy_from_iter_flushcache(addr, bytes, i); > -} > - > -static __always_inline __must_check > -size_t copy_mc_to_iter(void *addr, size_t bytes, struct iov_iter *i) > -{ > - if (unlikely(!check_copy_size(addr, bytes, true))) > - return 0; > - else > - return _copy_mc_to_iter(addr, bytes, i); > -} > - > size_t iov_iter_zero(size_t bytes, struct iov_iter *); > unsigned long iov_iter_alignment(const struct iov_iter *i); > unsigned long iov_iter_gap_alignment(const struct iov_iter *i); > -- > 2.30.2 >