On 2021/1/6 ??5:05, Eli Cohen wrote:> map_direct_mr() assumed that the number of scatter/gather entries
> returned by dma_map_sg_attrs() was equal to the number of segments in
> the sgl list. This led to wrong population of the mkey object. Fix this
> by properly referring to the returned value.
>
> In addition, get rid of fill_sg() whjich effect is overwritten bu
> populate_mtts().
Typo.
>
> Fixes: 94abbccdf291 ("vdpa/mlx5: Add shared memory registration
code")
> Signed-off-by: Eli Cohen <elic at nvidia.com>
> ---
> drivers/vdpa/mlx5/core/mlx5_vdpa.h | 1 +
> drivers/vdpa/mlx5/core/mr.c | 28 ++++++++++++----------------
> 2 files changed, 13 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
> index 5c92a576edae..08f742fd2409 100644
> --- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
> +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
> @@ -15,6 +15,7 @@ struct mlx5_vdpa_direct_mr {
> struct sg_table sg_head;
> int log_size;
> int nsg;
> + int nent;
> struct list_head list;
> u64 offset;
> };
> diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
> index 4b6195666c58..d300f799efcd 100644
> --- a/drivers/vdpa/mlx5/core/mr.c
> +++ b/drivers/vdpa/mlx5/core/mr.c
> @@ -25,17 +25,6 @@ static int get_octo_len(u64 len, int page_shift)
> return (npages + 1) / 2;
> }
>
> -static void fill_sg(struct mlx5_vdpa_direct_mr *mr, void *in)
> -{
> - struct scatterlist *sg;
> - __be64 *pas;
> - int i;
> -
> - pas = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
> - for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i)
> - (*pas) = cpu_to_be64(sg_dma_address(sg));
> -}
> -
> static void mlx5_set_access_mode(void *mkc, int mode)
> {
> MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
> @@ -45,10 +34,18 @@ static void mlx5_set_access_mode(void *mkc, int mode)
> static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt)
> {
> struct scatterlist *sg;
> + int nsg = mr->nsg;
> + u64 dma_addr;
> + u64 dma_len;
> + int j = 0;
> int i;
>
> - for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i)
> - mtt[i] = cpu_to_be64(sg_dma_address(sg));
> + for_each_sg(mr->sg_head.sgl, sg, mr->nent, i) {
> + for (dma_addr = sg_dma_address(sg), dma_len = sg_dma_len(sg);
> + nsg && dma_len;
> + nsg--, dma_addr += BIT(mr->log_size), dma_len -=
BIT(mr->log_size))
> + mtt[j++] = cpu_to_be64(dma_addr);
It looks to me the mtt entry is also limited by log_size. It's better to
explain this a little bit in the commit log.
Thanks
> + }
> }
>
> static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct
mlx5_vdpa_direct_mr *mr)
> @@ -64,7 +61,6 @@ static int create_direct_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_direct
> return -ENOMEM;
>
> MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
> - fill_sg(mr, in);
> mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
> MLX5_SET(mkc, mkc, lw, !!(mr->perm & VHOST_MAP_WO));
> MLX5_SET(mkc, mkc, lr, !!(mr->perm & VHOST_MAP_RO));
> @@ -276,8 +272,8 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_direct_mr
> done:
> mr->log_size = log_entity_size;
> mr->nsg = nsg;
> - err = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg,
DMA_BIDIRECTIONAL, 0);
> - if (!err)
> + mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg,
DMA_BIDIRECTIONAL, 0);
> + if (!mr->nent)
> goto err_map;
>
> err = create_direct_mr(mvdev, mr);