Liang Chen
2023-May-26 05:46 UTC
[PATCH net-next 5/5] virtio_net: Implement DMA pre-handler
Adding a DMA pre-handler that utilizes page pool for managing DMA mappings.
When IOMMU is enabled, turning on the page_pool_dma_map module parameter to
select page pool for DMA mapping management gives a significant reduction
in the overhead caused by DMA mappings.
In testing environments with a single core vm and qemu emulated IOMMU,
significant performance improvements can be observed:
Upstream codebase: 1.76 Gbits/sec
Upstream codebase with page pool fragmentation support: 1.81 Gbits/sec
Upstream codebase with page pool fragmentation and DMA support: 19.3
Gbits/sec
Signed-off-by: Liang Chen <liangchen.linux at gmail.com>
---
drivers/net/virtio_net.c | 55 ++++++++++++++++++++++++++++++++++++++++
1 file changed, 55 insertions(+)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index ac40b8c66c59..73cc4f9fe4fa 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -22,6 +22,7 @@
#include <net/route.h>
#include <net/xdp.h>
#include <net/net_failover.h>
+#include <linux/iommu.h>
static int napi_weight = NAPI_POLL_WEIGHT;
module_param(napi_weight, int, 0444);
@@ -33,8 +34,10 @@ module_param(napi_tx, bool, 0644);
static bool page_pool_enabled;
static bool page_pool_frag;
+static bool page_pool_dma_map;
module_param(page_pool_enabled, bool, 0400);
module_param(page_pool_frag, bool, 0400);
+module_param(page_pool_dma_map, bool, 0400);
/* FIXME: MTU in config. */
#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
@@ -3830,6 +3833,49 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
virtnet_free_queues(vi);
}
+static dma_addr_t virtnet_pp_dma_map_page(struct device *dev, struct page
*page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ struct page *head_page;
+
+ if (dir != DMA_FROM_DEVICE)
+ return 0;
+
+ head_page = compound_head(page);
+ return page_pool_get_dma_addr(head_page)
+ + (page - head_page) * PAGE_SIZE
+ + offset;
+}
+
+static bool virtnet_pp_dma_unmap_page(struct device *dev, dma_addr_t
dma_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ phys_addr_t phys;
+
+ /* Handle only the RX direction, and sync the DMA memory only if it's not
+ * a DMA coherent architecture.
+ */
+ if (dir != DMA_FROM_DEVICE)
+ return false;
+
+ if (dev_is_dma_coherent(dev))
+ return true;
+
+ phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
+ if (WARN_ON(!phys))
+ return false;
+
+ arch_sync_dma_for_cpu(phys, size, dir);
+ return true;
+}
+
+static struct virtqueue_pre_dma_ops virtnet_pp_pre_dma_ops = {
+ .map_page = virtnet_pp_dma_map_page,
+ .unmap_page = virtnet_pp_dma_unmap_page,
+};
+
static void virtnet_alloc_page_pool(struct receive_queue *rq)
{
struct virtio_device *vdev = rq->vq->vdev;
@@ -3845,6 +3891,15 @@ static void virtnet_alloc_page_pool(struct receive_queue
*rq)
if (page_pool_frag)
pp_params.flags |= PP_FLAG_PAGE_FRAG;
+ /* Consider using page pool DMA support only when DMA API is used. */
+ if (virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM) &&
+ page_pool_dma_map) {
+ pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pp_params.max_len = PAGE_SIZE << pp_params.order;
+ virtqueue_register_pre_dma_ops(rq->vq, &virtnet_pp_pre_dma_ops);
+ }
+
rq->page_pool = page_pool_create(&pp_params);
if (IS_ERR(rq->page_pool)) {
dev_warn(&vdev->dev, "page pool creation failed: %ld\n",
--
2.31.1
Jason Wang
2023-May-26 07:06 UTC
[PATCH net-next 5/5] virtio_net: Implement DMA pre-handler
On Fri, May 26, 2023 at 1:47?PM Liang Chen <liangchen.linux at gmail.com> wrote:> > Adding a DMA pre-handler that utilizes page pool for managing DMA mappings. > When IOMMU is enabled, turning on the page_pool_dma_map module parameter to > select page pool for DMA mapping management gives a significant reduction > in the overhead caused by DMA mappings. > > In testing environments with a single core vm and qemu emulated IOMMU, > significant performance improvements can be observed: > Upstream codebase: 1.76 Gbits/sec > Upstream codebase with page pool fragmentation support: 1.81 Gbits/sec > Upstream codebase with page pool fragmentation and DMA support: 19.3 > Gbits/sec > > Signed-off-by: Liang Chen <liangchen.linux at gmail.com> > --- > drivers/net/virtio_net.c | 55 ++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 55 insertions(+) > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c > index ac40b8c66c59..73cc4f9fe4fa 100644 > --- a/drivers/net/virtio_net.c > +++ b/drivers/net/virtio_net.c > @@ -22,6 +22,7 @@ > #include <net/route.h> > #include <net/xdp.h> > #include <net/net_failover.h> > +#include <linux/iommu.h> > > static int napi_weight = NAPI_POLL_WEIGHT; > module_param(napi_weight, int, 0444); > @@ -33,8 +34,10 @@ module_param(napi_tx, bool, 0644); > > static bool page_pool_enabled; > static bool page_pool_frag; > +static bool page_pool_dma_map; > module_param(page_pool_enabled, bool, 0400); > module_param(page_pool_frag, bool, 0400); > +module_param(page_pool_dma_map, bool, 0400); > > /* FIXME: MTU in config. */ > #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) > @@ -3830,6 +3833,49 @@ static void virtnet_del_vqs(struct virtnet_info *vi) > virtnet_free_queues(vi); > } > > +static dma_addr_t virtnet_pp_dma_map_page(struct device *dev, struct page *page, > + unsigned long offset, size_t size, > + enum dma_data_direction dir, unsigned long attrs) > +{ > + struct page *head_page; > + > + if (dir != DMA_FROM_DEVICE) > + return 0; > + > + head_page = compound_head(page); > + return page_pool_get_dma_addr(head_page) > + + (page - head_page) * PAGE_SIZE > + + offset;So it's not a map, it is just a query from the dma address from the pool.> +} > + > +static bool virtnet_pp_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, > + size_t size, enum dma_data_direction dir, > + unsigned long attrs) > +{ > + phys_addr_t phys; > + > + /* Handle only the RX direction, and sync the DMA memory only if it's not > + * a DMA coherent architecture. > + */ > + if (dir != DMA_FROM_DEVICE) > + return false; > + > + if (dev_is_dma_coherent(dev)) > + return true; > + > + phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);This would be somehow slow. If we track the mapping by driver, it would be much faster. More could be seen here: https://lists.linuxfoundation.org/pipermail/virtualization/2023-May/066778.html Thanks> + if (WARN_ON(!phys)) > + return false; > + > + arch_sync_dma_for_cpu(phys, size, dir); > + return true; > +} > + > +static struct virtqueue_pre_dma_ops virtnet_pp_pre_dma_ops = { > + .map_page = virtnet_pp_dma_map_page, > + .unmap_page = virtnet_pp_dma_unmap_page, > +}; > + > static void virtnet_alloc_page_pool(struct receive_queue *rq) > { > struct virtio_device *vdev = rq->vq->vdev; > @@ -3845,6 +3891,15 @@ static void virtnet_alloc_page_pool(struct receive_queue *rq) > if (page_pool_frag) > pp_params.flags |= PP_FLAG_PAGE_FRAG; > > + /* Consider using page pool DMA support only when DMA API is used. */ > + if (virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM) && > + page_pool_dma_map) { > + pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; > + pp_params.dma_dir = DMA_FROM_DEVICE; > + pp_params.max_len = PAGE_SIZE << pp_params.order; > + virtqueue_register_pre_dma_ops(rq->vq, &virtnet_pp_pre_dma_ops); > + } > + > rq->page_pool = page_pool_create(&pp_params); > if (IS_ERR(rq->page_pool)) { > dev_warn(&vdev->dev, "page pool creation failed: %ld\n", > -- > 2.31.1 >
kernel test robot
2023-May-26 17:34 UTC
[PATCH net-next 5/5] virtio_net: Implement DMA pre-handler
Hi Liang,
kernel test robot noticed the following build errors:
[auto build test ERROR on net-next/main]
url:
https://github.com/intel-lab-lkp/linux/commits/Liang-Chen/virtio_net-Add-page_pool-support-to-improve-performance/20230526-135805
base: net-next/main
patch link:
https://lore.kernel.org/r/20230526054621.18371-5-liangchen.linux%40gmail.com
patch subject: [PATCH net-next 5/5] virtio_net: Implement DMA pre-handler
config: m68k-allmodconfig
(https://download.01.org/0day-ci/archive/20230527/202305270110.TbNSDh0Z-lkp at
intel.com/config)
compiler: m68k-linux-gcc (GCC) 12.1.0
reproduce (this is a W=1 build):
mkdir -p ~/bin
wget
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O
~/bin/make.cross
chmod +x ~/bin/make.cross
#
https://github.com/intel-lab-lkp/linux/commit/e968bb5cacd30b672d0ccf705a24f1a792ff45aa
git remote add linux-review https://github.com/intel-lab-lkp/linux
git fetch --no-tags linux-review
Liang-Chen/virtio_net-Add-page_pool-support-to-improve-performance/20230526-135805
git checkout e968bb5cacd30b672d0ccf705a24f1a792ff45aa
# save the config file
mkdir build_dir && cp config build_dir/.config
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 ~/bin/make.cross
W=1 O=build_dir ARCH=m68k olddefconfig
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 ~/bin/make.cross
W=1 O=build_dir ARCH=m68k SHELL=/bin/bash
If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp at intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202305270110.TbNSDh0Z-lkp at
intel.com/
All errors (new ones prefixed by >>, old ones prefixed by <<):
>> ERROR: modpost: "iommu_get_dma_domain"
[drivers/net/virtio_net.ko] undefined!
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki