static int nvme_rdma_map_data()

in host/rdma.c [1524:1623]


static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
		struct request *rq, struct nvme_command *c)
{
	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
	struct nvme_rdma_device *dev = queue->device;
	struct ib_device *ibdev = dev->dev;
	int pi_count = 0;
	int count, ret;

	req->num_sge = 1;
	refcount_set(&req->ref, 2); /* send and recv completions */

	c->common.flags |= NVME_CMD_SGL_METABUF;

	if (!blk_rq_nr_phys_segments(rq))
		return nvme_rdma_set_sg_null(c);

	req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1);
	ret = sg_alloc_table_chained(&req->data_sgl.sg_table,
			blk_rq_nr_phys_segments(rq), req->data_sgl.sg_table.sgl,
			NVME_INLINE_SG_CNT);
	if (ret)
		return -ENOMEM;

	req->data_sgl.nents = blk_rq_map_sg(rq->q, rq,
					    req->data_sgl.sg_table.sgl);

	count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
			      req->data_sgl.nents, rq_dma_dir(rq));
	if (unlikely(count <= 0)) {
		ret = -EIO;
		goto out_free_table;
	}

	if (blk_integrity_rq(rq)) {
		req->metadata_sgl->sg_table.sgl =
			(struct scatterlist *)(req->metadata_sgl + 1);
		ret = sg_alloc_table_chained(&req->metadata_sgl->sg_table,
				blk_rq_count_integrity_sg(rq->q, rq->bio),
				req->metadata_sgl->sg_table.sgl,
				NVME_INLINE_METADATA_SG_CNT);
		if (unlikely(ret)) {
			ret = -ENOMEM;
			goto out_unmap_sg;
		}

		req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q,
				rq->bio, req->metadata_sgl->sg_table.sgl);
		pi_count = ib_dma_map_sg(ibdev,
					 req->metadata_sgl->sg_table.sgl,
					 req->metadata_sgl->nents,
					 rq_dma_dir(rq));
		if (unlikely(pi_count <= 0)) {
			ret = -EIO;
			goto out_free_pi_table;
		}
	}

	if (req->use_sig_mr) {
		ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count);
		goto out;
	}

	if (count <= dev->num_inline_segments) {
		if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
		    queue->ctrl->use_inline_data &&
		    blk_rq_payload_bytes(rq) <=
				nvme_rdma_inline_data_size(queue)) {
			ret = nvme_rdma_map_sg_inline(queue, req, c, count);
			goto out;
		}

		if (count == 1 && dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
			ret = nvme_rdma_map_sg_single(queue, req, c);
			goto out;
		}
	}

	ret = nvme_rdma_map_sg_fr(queue, req, c, count);
out:
	if (unlikely(ret))
		goto out_unmap_pi_sg;

	return 0;

out_unmap_pi_sg:
	if (blk_integrity_rq(rq))
		ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
				req->metadata_sgl->nents, rq_dma_dir(rq));
out_free_pi_table:
	if (blk_integrity_rq(rq))
		sg_free_table_chained(&req->metadata_sgl->sg_table,
				      NVME_INLINE_METADATA_SG_CNT);
out_unmap_sg:
	ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
			rq_dma_dir(rq));
out_free_table:
	sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
	return ret;
}