static int qat_rsa_dec()

in qat/qat_common/qat_asym_algs.c [770:920]


static int qat_rsa_dec(struct akcipher_request *req)
{
	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
	struct qat_crypto_instance *inst = ctx->inst;
	struct device *dev = &GET_DEV(inst->accel_dev);
	struct qat_asym_request *qat_req =
			PTR_ALIGN(akcipher_request_ctx(req), 64);
	struct icp_qat_fw_pke_request *msg = &qat_req->req;
	int ret, ctr = 0;

	if (unlikely(!ctx->n || !ctx->d))
		return -EINVAL;

	if (req->dst_len < ctx->key_sz) {
		req->dst_len = ctx->key_sz;
		return -EOVERFLOW;
	}
	memset(msg, '\0', sizeof(*msg));
	ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
					  ICP_QAT_FW_COMN_REQ_FLAG_SET);
	msg->pke_hdr.cd_pars.func_id = ctx->crt_mode ?
		qat_rsa_dec_fn_id_crt(ctx->key_sz) :
		qat_rsa_dec_fn_id(ctx->key_sz);
	if (unlikely(!msg->pke_hdr.cd_pars.func_id))
		return -EINVAL;

	qat_req->cb = qat_rsa_cb;
	qat_req->ctx.rsa = ctx;
	qat_req->areq.rsa = req;
	msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
	msg->pke_hdr.comn_req_flags =
		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
					    QAT_COMN_CD_FLD_TYPE_64BIT_ADR);

	if (ctx->crt_mode) {
		qat_req->in.rsa.dec_crt.p = ctx->dma_p;
		qat_req->in.rsa.dec_crt.q = ctx->dma_q;
		qat_req->in.rsa.dec_crt.dp = ctx->dma_dp;
		qat_req->in.rsa.dec_crt.dq = ctx->dma_dq;
		qat_req->in.rsa.dec_crt.qinv = ctx->dma_qinv;
	} else {
		qat_req->in.rsa.dec.d = ctx->dma_d;
		qat_req->in.rsa.dec.n = ctx->dma_n;
	}
	ret = -ENOMEM;

	/*
	 * src can be of any size in valid range, but HW expects it to be the
	 * same as modulo n so in case it is different we need to allocate a
	 * new buf and copy src data.
	 * In other case we just need to map the user provided buffer.
	 * Also need to make sure that it is in contiguous buffer.
	 */
	if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
		qat_req->src_align = NULL;
		qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src),
						   req->dst_len, DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
			return ret;

	} else {
		int shift = ctx->key_sz - req->src_len;

		qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
							&qat_req->in.rsa.dec.c,
							GFP_KERNEL);
		if (unlikely(!qat_req->src_align))
			return ret;

		scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
					 0, req->src_len, 0);
	}
	if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
		qat_req->dst_align = NULL;
		qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst),
						    req->dst_len,
						    DMA_FROM_DEVICE);

		if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
			goto unmap_src;

	} else {
		qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
							&qat_req->out.rsa.dec.m,
							GFP_KERNEL);
		if (unlikely(!qat_req->dst_align))
			goto unmap_src;

	}

	if (ctx->crt_mode)
		qat_req->in.rsa.in_tab[6] = 0;
	else
		qat_req->in.rsa.in_tab[3] = 0;
	qat_req->out.rsa.out_tab[1] = 0;
	qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c,
					 sizeof(qat_req->in.rsa.dec.c),
					 DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
		goto unmap_dst;

	qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m,
					  sizeof(qat_req->out.rsa.dec.m),
					  DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
		goto unmap_in_params;

	msg->pke_mid.src_data_addr = qat_req->phy_in;
	msg->pke_mid.dest_data_addr = qat_req->phy_out;
	msg->pke_mid.opaque = (u64)(__force long)qat_req;
	if (ctx->crt_mode)
		msg->input_param_count = 6;
	else
		msg->input_param_count = 3;

	msg->output_param_count = 1;
	do {
		ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
	} while (ret == -EBUSY && ctr++ < 100);

	if (!ret)
		return -EINPROGRESS;

	if (!dma_mapping_error(dev, qat_req->phy_out))
		dma_unmap_single(dev, qat_req->phy_out,
				 sizeof(struct qat_rsa_output_params),
				 DMA_TO_DEVICE);
unmap_in_params:
	if (!dma_mapping_error(dev, qat_req->phy_in))
		dma_unmap_single(dev, qat_req->phy_in,
				 sizeof(struct qat_rsa_input_params),
				 DMA_TO_DEVICE);
unmap_dst:
	if (qat_req->dst_align)
		dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
				  qat_req->out.rsa.dec.m);
	else
		if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
			dma_unmap_single(dev, qat_req->out.rsa.dec.m,
					 ctx->key_sz, DMA_FROM_DEVICE);
unmap_src:
	if (qat_req->src_align)
		dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
				  qat_req->in.rsa.dec.c);
	else
		if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
			dma_unmap_single(dev, qat_req->in.rsa.dec.c,
					 ctx->key_sz, DMA_TO_DEVICE);
	return ret;
}