static int map_direct_mr()

in mlx5/core/mr.c [223:293]


static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr,
			 struct vhost_iotlb *iotlb)
{
	struct vhost_iotlb_map *map;
	unsigned long lgcd = 0;
	int log_entity_size;
	unsigned long size;
	u64 start = 0;
	int err;
	struct page *pg;
	unsigned int nsg;
	int sglen;
	u64 pa;
	u64 paend;
	struct scatterlist *sg;
	struct device *dma = mvdev->vdev.dma_dev;

	for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
	     map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) {
		size = maplen(map, mr);
		lgcd = gcd(lgcd, size);
		start += size;
	}
	log_entity_size = ilog2(lgcd);

	sglen = 1 << log_entity_size;
	nsg = MLX5_DIV_ROUND_UP_POW2(mr->end - mr->start, log_entity_size);

	err = sg_alloc_table(&mr->sg_head, nsg, GFP_KERNEL);
	if (err)
		return err;

	sg = mr->sg_head.sgl;
	for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
	     map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) {
		paend = map->addr + maplen(map, mr);
		for (pa = map->addr; pa < paend; pa += sglen) {
			pg = pfn_to_page(__phys_to_pfn(pa));
			if (!sg) {
				mlx5_vdpa_warn(mvdev, "sg null. start 0x%llx, end 0x%llx\n",
					       map->start, map->last + 1);
				err = -ENOMEM;
				goto err_map;
			}
			sg_set_page(sg, pg, sglen, 0);
			sg = sg_next(sg);
			if (!sg)
				goto done;
		}
	}
done:
	mr->log_size = log_entity_size;
	mr->nsg = nsg;
	mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
	if (!mr->nent) {
		err = -ENOMEM;
		goto err_map;
	}

	err = create_direct_mr(mvdev, mr);
	if (err)
		goto err_direct;

	return 0;

err_direct:
	dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
err_map:
	sg_free_table(&mr->sg_head);
	return err;
}