static int noinline_for_stack sun4i_ss_opti_poll()

in allwinner/sun4i-ss/sun4i-ss-cipher.c [15:149]


static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
	struct sun4i_ss_ctx *ss = op->ss;
	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
	struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
	u32 mode = ctx->mode;
	void *backup_iv = NULL;
	/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
	u32 rx_cnt = SS_RX_DEFAULT;
	u32 tx_cnt = 0;
	u32 spaces;
	u32 v;
	int err = 0;
	unsigned int i;
	unsigned int ileft = areq->cryptlen;
	unsigned int oleft = areq->cryptlen;
	unsigned int todo;
	unsigned long pi = 0, po = 0; /* progress for in and out */
	bool miter_err;
	struct sg_mapping_iter mi, mo;
	unsigned int oi, oo; /* offset for in and out */
	unsigned long flags;
	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
	struct sun4i_ss_alg_template *algt;

	if (!areq->cryptlen)
		return 0;

	if (!areq->src || !areq->dst) {
		dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
		return -EINVAL;
	}

	if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
		backup_iv = kzalloc(ivsize, GFP_KERNEL);
		if (!backup_iv)
			return -ENOMEM;
		scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0);
	}

	if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
		algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
		algt->stat_opti++;
		algt->stat_bytes += areq->cryptlen;
	}

	spin_lock_irqsave(&ss->slock, flags);

	for (i = 0; i < op->keylen / 4; i++)
		writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);

	if (areq->iv) {
		for (i = 0; i < 4 && i < ivsize / 4; i++) {
			v = *(u32 *)(areq->iv + i * 4);
			writesl(ss->base + SS_IV0 + i * 4, &v, 1);
		}
	}
	writel(mode, ss->base + SS_CTL);


	ileft = areq->cryptlen / 4;
	oleft = areq->cryptlen / 4;
	oi = 0;
	oo = 0;
	do {
		if (ileft) {
			sg_miter_start(&mi, areq->src, sg_nents(areq->src),
					SG_MITER_FROM_SG | SG_MITER_ATOMIC);
			if (pi)
				sg_miter_skip(&mi, pi);
			miter_err = sg_miter_next(&mi);
			if (!miter_err || !mi.addr) {
				dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
				err = -EINVAL;
				goto release_ss;
			}
			todo = min(rx_cnt, ileft);
			todo = min_t(size_t, todo, (mi.length - oi) / 4);
			if (todo) {
				ileft -= todo;
				writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
				oi += todo * 4;
			}
			if (oi == mi.length) {
				pi += mi.length;
				oi = 0;
			}
			sg_miter_stop(&mi);
		}

		spaces = readl(ss->base + SS_FCSR);
		rx_cnt = SS_RXFIFO_SPACES(spaces);
		tx_cnt = SS_TXFIFO_SPACES(spaces);

		sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
			       SG_MITER_TO_SG | SG_MITER_ATOMIC);
		if (po)
			sg_miter_skip(&mo, po);
		miter_err = sg_miter_next(&mo);
		if (!miter_err || !mo.addr) {
			dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
			err = -EINVAL;
			goto release_ss;
		}
		todo = min(tx_cnt, oleft);
		todo = min_t(size_t, todo, (mo.length - oo) / 4);
		if (todo) {
			oleft -= todo;
			readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
			oo += todo * 4;
		}
		if (oo == mo.length) {
			oo = 0;
			po += mo.length;
		}
		sg_miter_stop(&mo);
	} while (oleft);

	if (areq->iv) {
		if (mode & SS_DECRYPTION) {
			memcpy(areq->iv, backup_iv, ivsize);
			kfree_sensitive(backup_iv);
		} else {
			scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
						 ivsize, 0);
		}
	}

release_ss:
	writel(0, ss->base + SS_CTL);
	spin_unlock_irqrestore(&ss->slock, flags);
	return err;
}