in sa2ul.c [1080:1295]
static int sa_run(struct sa_req *req)
{
struct sa_rx_data *rxd;
gfp_t gfp_flags;
u32 cmdl[SA_MAX_CMDL_WORDS];
struct sa_crypto_data *pdata = dev_get_drvdata(sa_k3_dev);
struct device *ddev;
struct dma_chan *dma_rx;
int sg_nents, src_nents, dst_nents;
struct scatterlist *src, *dst;
size_t pl, ml, split_size;
struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec;
int ret;
struct dma_async_tx_descriptor *tx_out;
u32 *mdptr;
bool diff_dst;
enum dma_data_direction dir_src;
struct sa_mapped_sg *mapped_sg;
gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
rxd = kzalloc(sizeof(*rxd), gfp_flags);
if (!rxd)
return -ENOMEM;
if (req->src != req->dst) {
diff_dst = true;
dir_src = DMA_TO_DEVICE;
} else {
diff_dst = false;
dir_src = DMA_BIDIRECTIONAL;
}
/*
* SA2UL has an interesting feature where the receive DMA channel
* is selected based on the data passed to the engine. Within the
* transition range, there is also a space where it is impossible
* to determine where the data will end up, and this should be
* avoided. This will be handled by the SW fallback mechanism by
* the individual algorithm implementations.
*/
if (req->size >= 256)
dma_rx = pdata->dma_rx2;
else
dma_rx = pdata->dma_rx1;
ddev = dmaengine_get_dma_device(pdata->dma_tx);
rxd->ddev = ddev;
memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
sa_update_cmdl(req, cmdl, &sa_ctx->cmdl_upd_info);
if (req->type != CRYPTO_ALG_TYPE_AHASH) {
if (req->enc)
req->type |=
(SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT);
else
req->type |=
(SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT);
}
cmdl[sa_ctx->cmdl_size / sizeof(u32)] = req->type;
/*
* Map the packets, first we check if the data fits into a single
* sg entry and use that if possible. If it does not fit, we check
* if we need to do sg_split to align the scatterlist data on the
* actual data size being processed by the crypto engine.
*/
src = req->src;
sg_nents = sg_nents_for_len(src, req->size);
split_size = req->size;
mapped_sg = &rxd->mapped_sg[0];
if (sg_nents == 1 && split_size <= req->src->length) {
src = &mapped_sg->static_sg;
src_nents = 1;
sg_init_table(src, 1);
sg_set_page(src, sg_page(req->src), split_size,
req->src->offset);
mapped_sg->sgt.sgl = src;
mapped_sg->sgt.orig_nents = src_nents;
ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
if (ret) {
kfree(rxd);
return ret;
}
mapped_sg->dir = dir_src;
mapped_sg->mapped = true;
} else {
mapped_sg->sgt.sgl = req->src;
mapped_sg->sgt.orig_nents = sg_nents;
ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
if (ret) {
kfree(rxd);
return ret;
}
mapped_sg->dir = dir_src;
mapped_sg->mapped = true;
ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents, 0, 1,
&split_size, &src, &src_nents, gfp_flags);
if (ret) {
src_nents = mapped_sg->sgt.nents;
src = mapped_sg->sgt.sgl;
} else {
mapped_sg->split_sg = src;
}
}
dma_sync_sgtable_for_device(ddev, &mapped_sg->sgt, DMA_TO_DEVICE);
if (!diff_dst) {
dst_nents = src_nents;
dst = src;
} else {
dst_nents = sg_nents_for_len(req->dst, req->size);
mapped_sg = &rxd->mapped_sg[1];
if (dst_nents == 1 && split_size <= req->dst->length) {
dst = &mapped_sg->static_sg;
dst_nents = 1;
sg_init_table(dst, 1);
sg_set_page(dst, sg_page(req->dst), split_size,
req->dst->offset);
mapped_sg->sgt.sgl = dst;
mapped_sg->sgt.orig_nents = dst_nents;
ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
DMA_FROM_DEVICE, 0);
if (ret)
goto err_cleanup;
mapped_sg->dir = DMA_FROM_DEVICE;
mapped_sg->mapped = true;
} else {
mapped_sg->sgt.sgl = req->dst;
mapped_sg->sgt.orig_nents = dst_nents;
ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
DMA_FROM_DEVICE, 0);
if (ret)
goto err_cleanup;
mapped_sg->dir = DMA_FROM_DEVICE;
mapped_sg->mapped = true;
ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents,
0, 1, &split_size, &dst, &dst_nents,
gfp_flags);
if (ret) {
dst_nents = mapped_sg->sgt.nents;
dst = mapped_sg->sgt.sgl;
} else {
mapped_sg->split_sg = dst;
}
}
}
rxd->tx_in = dmaengine_prep_slave_sg(dma_rx, dst, dst_nents,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!rxd->tx_in) {
dev_err(pdata->dev, "IN prep_slave_sg() failed\n");
ret = -EINVAL;
goto err_cleanup;
}
rxd->req = (void *)req->base;
rxd->enc = req->enc;
rxd->iv_idx = req->ctx->iv_idx;
rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size;
rxd->tx_in->callback = req->callback;
rxd->tx_in->callback_param = rxd;
tx_out = dmaengine_prep_slave_sg(pdata->dma_tx, src,
src_nents, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!tx_out) {
dev_err(pdata->dev, "OUT prep_slave_sg() failed\n");
ret = -EINVAL;
goto err_cleanup;
}
/*
* Prepare metadata for DMA engine. This essentially describes the
* crypto algorithm to be used, data sizes, different keys etc.
*/
mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(tx_out, &pl, &ml);
sa_prepare_tx_desc(mdptr, (sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS *
sizeof(u32))), cmdl, sizeof(sa_ctx->epib),
sa_ctx->epib);
ml = sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * sizeof(u32));
dmaengine_desc_set_metadata_len(tx_out, req->mdata_size);
dmaengine_submit(tx_out);
dmaengine_submit(rxd->tx_in);
dma_async_issue_pending(dma_rx);
dma_async_issue_pending(pdata->dma_tx);
return -EINPROGRESS;
err_cleanup:
sa_free_sa_rx_data(rxd);
return ret;
}