static int sec_alg_skcipher_crypto()

in hisilicon/sec/sec_algs.c [707:860]


static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
				   bool encrypt)
{
	struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
	struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
	struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
	struct sec_queue *queue = ctx->queue;
	struct sec_request *sec_req = skcipher_request_ctx(skreq);
	struct sec_dev_info *info = queue->dev_info;
	int i, ret, steps;
	size_t *split_sizes;
	struct scatterlist **splits_in;
	struct scatterlist **splits_out = NULL;
	int *splits_in_nents;
	int *splits_out_nents = NULL;
	struct sec_request_el *el, *temp;
	bool split = skreq->src != skreq->dst;
	gfp_t gfp = skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;

	mutex_init(&sec_req->lock);
	sec_req->req_base = &skreq->base;
	sec_req->err = 0;
	/* SGL mapping out here to allow us to break it up as necessary */
	sec_req->len_in = sg_nents(skreq->src);

	ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes,
						 &steps, gfp);
	if (ret)
		return ret;
	sec_req->num_elements = steps;
	ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in,
				   &splits_in_nents, sec_req->len_in,
				   info->dev, gfp);
	if (ret)
		goto err_free_split_sizes;

	if (split) {
		sec_req->len_out = sg_nents(skreq->dst);
		ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
					   &splits_out, &splits_out_nents,
					   sec_req->len_out, info->dev, gfp);
		if (ret)
			goto err_unmap_in_sg;
	}
	/* Shared info stored in seq_req - applies to all BDs */
	sec_req->tfm_ctx = ctx;
	sec_req->cb = sec_skcipher_alg_callback;
	INIT_LIST_HEAD(&sec_req->elements);

	/*
	 * Future optimization.
	 * In the chaining case we can't use a dma pool bounce buffer
	 * but in the case where we know there is no chaining we can
	 */
	if (crypto_skcipher_ivsize(atfm)) {
		sec_req->dma_iv = dma_map_single(info->dev, skreq->iv,
						 crypto_skcipher_ivsize(atfm),
						 DMA_TO_DEVICE);
		if (dma_mapping_error(info->dev, sec_req->dma_iv)) {
			ret = -ENOMEM;
			goto err_unmap_out_sg;
		}
	}

	/* Set them all up then queue - cleaner error handling. */
	for (i = 0; i < steps; i++) {
		el = sec_alg_alloc_and_fill_el(&ctx->req_template,
					       encrypt ? 1 : 0,
					       split_sizes[i],
					       skreq->src != skreq->dst,
					       splits_in[i], splits_in_nents[i],
					       split ? splits_out[i] : NULL,
					       split ? splits_out_nents[i] : 0,
					       info, gfp);
		if (IS_ERR(el)) {
			ret = PTR_ERR(el);
			goto err_free_elements;
		}
		el->req.cipher_iv_addr_lo = lower_32_bits(sec_req->dma_iv);
		el->req.cipher_iv_addr_hi = upper_32_bits(sec_req->dma_iv);
		el->sec_req = sec_req;
		list_add_tail(&el->head, &sec_req->elements);
	}

	/*
	 * Only attempt to queue if the whole lot can fit in the queue -
	 * we can't successfully cleanup after a partial queing so this
	 * must succeed or fail atomically.
	 *
	 * Big hammer test of both software and hardware queues - could be
	 * more refined but this is unlikely to happen so no need.
	 */

	/* Grab a big lock for a long time to avoid concurrency issues */
	mutex_lock(&queue->queuelock);

	/*
	 * Can go on to queue if we have space in either:
	 * 1) The hardware queue and no software queue
	 * 2) The software queue
	 * AND there is nothing in the backlog.  If there is backlog we
	 * have to only queue to the backlog queue and return busy.
	 */
	if ((!sec_queue_can_enqueue(queue, steps) &&
	     (!queue->havesoftqueue ||
	      kfifo_avail(&queue->softqueue) > steps)) ||
	    !list_empty(&ctx->backlog)) {
		ret = -EBUSY;
		if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
			list_add_tail(&sec_req->backlog_head, &ctx->backlog);
			mutex_unlock(&queue->queuelock);
			goto out;
		}

		mutex_unlock(&queue->queuelock);
		goto err_free_elements;
	}
	ret = sec_send_request(sec_req, queue);
	mutex_unlock(&queue->queuelock);
	if (ret)
		goto err_free_elements;

	ret = -EINPROGRESS;
out:
	/* Cleanup - all elements in pointer arrays have been copied */
	kfree(splits_in_nents);
	kfree(splits_in);
	kfree(splits_out_nents);
	kfree(splits_out);
	kfree(split_sizes);
	return ret;

err_free_elements:
	list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
		list_del(&el->head);
		sec_alg_free_el(el, info);
	}
	if (crypto_skcipher_ivsize(atfm))
		dma_unmap_single(info->dev, sec_req->dma_iv,
				 crypto_skcipher_ivsize(atfm),
				 DMA_BIDIRECTIONAL);
err_unmap_out_sg:
	if (split)
		sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
				    splits_out_nents, sec_req->len_out,
				    info->dev);
err_unmap_in_sg:
	sec_unmap_sg_on_err(skreq->src, steps, splits_in, splits_in_nents,
			    sec_req->len_in, info->dev);
err_free_split_sizes:
	kfree(split_sizes);

	return ret;
}