static int handle_aead_req()

in bcm/cipher.c [1259:1523]


static int handle_aead_req(struct iproc_reqctx_s *rctx)
{
	struct spu_hw *spu = &iproc_priv.spu;
	struct crypto_async_request *areq = rctx->parent;
	struct aead_request *req = container_of(areq,
						struct aead_request, base);
	struct iproc_ctx_s *ctx = rctx->ctx;
	int err;
	unsigned int chunksize;
	unsigned int resp_len;
	u32 spu_hdr_len;
	u32 db_size;
	u32 stat_pad_len;
	u32 pad_len;
	struct brcm_message *mssg;	/* mailbox message */
	struct spu_request_opts req_opts;
	struct spu_cipher_parms cipher_parms;
	struct spu_hash_parms hash_parms;
	struct spu_aead_parms aead_parms;
	int assoc_nents = 0;
	bool incl_icv = false;
	unsigned int digestsize = ctx->digestsize;

	/* number of entries in src and dst sg. Always includes SPU msg header.
	 */
	u8 rx_frag_num = 2;	/* and STATUS */
	u8 tx_frag_num = 1;

	/* doing the whole thing at once */
	chunksize = rctx->total_todo;

	flow_log("%s: chunksize %u\n", __func__, chunksize);

	memset(&req_opts, 0, sizeof(req_opts));
	memset(&hash_parms, 0, sizeof(hash_parms));
	memset(&aead_parms, 0, sizeof(aead_parms));

	req_opts.is_inbound = !(rctx->is_encrypt);
	req_opts.auth_first = ctx->auth_first;
	req_opts.is_aead = true;
	req_opts.is_esp = ctx->is_esp;

	cipher_parms.alg = ctx->cipher.alg;
	cipher_parms.mode = ctx->cipher.mode;
	cipher_parms.type = ctx->cipher_type;
	cipher_parms.key_buf = ctx->enckey;
	cipher_parms.key_len = ctx->enckeylen;
	cipher_parms.iv_buf = rctx->msg_buf.iv_ctr;
	cipher_parms.iv_len = rctx->iv_ctr_len;

	hash_parms.alg = ctx->auth.alg;
	hash_parms.mode = ctx->auth.mode;
	hash_parms.type = HASH_TYPE_NONE;
	hash_parms.key_buf = (u8 *)ctx->authkey;
	hash_parms.key_len = ctx->authkeylen;
	hash_parms.digestsize = digestsize;

	if ((ctx->auth.alg == HASH_ALG_SHA224) &&
	    (ctx->authkeylen < SHA224_DIGEST_SIZE))
		hash_parms.key_len = SHA224_DIGEST_SIZE;

	aead_parms.assoc_size = req->assoclen;
	if (ctx->is_esp && !ctx->is_rfc4543) {
		/*
		 * 8-byte IV is included assoc data in request. SPU2
		 * expects AAD to include just SPI and seqno. So
		 * subtract off the IV len.
		 */
		aead_parms.assoc_size -= GCM_RFC4106_IV_SIZE;

		if (rctx->is_encrypt) {
			aead_parms.return_iv = true;
			aead_parms.ret_iv_len = GCM_RFC4106_IV_SIZE;
			aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE;
		}
	} else {
		aead_parms.ret_iv_len = 0;
	}

	/*
	 * Count number of sg entries from the crypto API request that are to
	 * be included in this mailbox message. For dst sg, don't count space
	 * for digest. Digest gets caught in a separate buffer and copied back
	 * to dst sg when processing response.
	 */
	rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
	rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
	if (aead_parms.assoc_size)
		assoc_nents = spu_sg_count(rctx->assoc, 0,
					   aead_parms.assoc_size);

	mssg = &rctx->mb_mssg;

	rctx->total_sent = chunksize;
	rctx->src_sent = chunksize;
	if (spu->spu_assoc_resp_len(ctx->cipher.mode,
				    aead_parms.assoc_size,
				    aead_parms.ret_iv_len,
				    rctx->is_encrypt))
		rx_frag_num++;

	aead_parms.iv_len = spu->spu_aead_ivlen(ctx->cipher.mode,
						rctx->iv_ctr_len);

	if (ctx->auth.alg == HASH_ALG_AES)
		hash_parms.type = (enum hash_type)ctx->cipher_type;

	/* General case AAD padding (CCM and RFC4543 special cases below) */
	aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
						 aead_parms.assoc_size);

	/* General case data padding (CCM decrypt special case below) */
	aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
							   chunksize);

	if (ctx->cipher.mode == CIPHER_MODE_CCM) {
		/*
		 * for CCM, AAD len + 2 (rather than AAD len) needs to be
		 * 128-bit aligned
		 */
		aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(
					 ctx->cipher.mode,
					 aead_parms.assoc_size + 2);

		/*
		 * And when decrypting CCM, need to pad without including
		 * size of ICV which is tacked on to end of chunk
		 */
		if (!rctx->is_encrypt)
			aead_parms.data_pad_len =
				spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
							chunksize - digestsize);

		/* CCM also requires software to rewrite portions of IV: */
		spu->spu_ccm_update_iv(digestsize, &cipher_parms, req->assoclen,
				       chunksize, rctx->is_encrypt,
				       ctx->is_esp);
	}

	if (ctx->is_rfc4543) {
		/*
		 * RFC4543: data is included in AAD, so don't pad after AAD
		 * and pad data based on both AAD + data size
		 */
		aead_parms.aad_pad_len = 0;
		if (!rctx->is_encrypt)
			aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
					ctx->cipher.mode,
					aead_parms.assoc_size + chunksize -
					digestsize);
		else
			aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
					ctx->cipher.mode,
					aead_parms.assoc_size + chunksize);

		req_opts.is_rfc4543 = true;
	}

	if (spu_req_incl_icv(ctx->cipher.mode, rctx->is_encrypt)) {
		incl_icv = true;
		tx_frag_num++;
		/* Copy ICV from end of src scatterlist to digest buf */
		sg_copy_part_to_buf(req->src, rctx->msg_buf.digest, digestsize,
				    req->assoclen + rctx->total_sent -
				    digestsize);
	}

	atomic64_add(chunksize, &iproc_priv.bytes_out);

	flow_log("%s()-sent chunksize:%u\n", __func__, chunksize);

	/* Prepend SPU header with type 3 BCM header */
	memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);

	spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
					      BCM_HDR_LEN, &req_opts,
					      &cipher_parms, &hash_parms,
					      &aead_parms, chunksize);

	/* Determine total length of padding. Put all padding in one buffer. */
	db_size = spu_real_db_size(aead_parms.assoc_size, aead_parms.iv_len, 0,
				   chunksize, aead_parms.aad_pad_len,
				   aead_parms.data_pad_len, 0);

	stat_pad_len = spu->spu_wordalign_padlen(db_size);

	if (stat_pad_len)
		rx_frag_num++;
	pad_len = aead_parms.data_pad_len + stat_pad_len;
	if (pad_len) {
		tx_frag_num++;
		spu->spu_request_pad(rctx->msg_buf.spu_req_pad,
				     aead_parms.data_pad_len, 0,
				     ctx->auth.alg, ctx->auth.mode,
				     rctx->total_sent, stat_pad_len);
	}

	spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
			      spu_hdr_len);
	dump_sg(rctx->assoc, 0, aead_parms.assoc_size);
	packet_dump("    aead iv: ", rctx->msg_buf.iv_ctr, aead_parms.iv_len);
	packet_log("BD:\n");
	dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
	packet_dump("   pad: ", rctx->msg_buf.spu_req_pad, pad_len);

	/*
	 * Build mailbox message containing SPU request msg and rx buffers
	 * to catch response message
	 */
	memset(mssg, 0, sizeof(*mssg));
	mssg->type = BRCM_MESSAGE_SPU;
	mssg->ctx = rctx;	/* Will be returned in response */

	/* Create rx scatterlist to catch result */
	rx_frag_num += rctx->dst_nents;
	resp_len = chunksize;

	/*
	 * Always catch ICV in separate buffer. Have to for GCM/CCM because of
	 * padding. Have to for SHA-224 and other truncated SHAs because SPU
	 * sends entire digest back.
	 */
	rx_frag_num++;

	if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
	     (ctx->cipher.mode == CIPHER_MODE_CCM)) && !rctx->is_encrypt) {
		/*
		 * Input is ciphertxt plus ICV, but ICV not incl
		 * in output.
		 */
		resp_len -= ctx->digestsize;
		if (resp_len == 0)
			/* no rx frags to catch output data */
			rx_frag_num -= rctx->dst_nents;
	}

	err = spu_aead_rx_sg_create(mssg, req, rctx, rx_frag_num,
				    aead_parms.assoc_size,
				    aead_parms.ret_iv_len, resp_len, digestsize,
				    stat_pad_len);
	if (err)
		return err;

	/* Create tx scatterlist containing SPU request message */
	tx_frag_num += rctx->src_nents;
	tx_frag_num += assoc_nents;
	if (aead_parms.aad_pad_len)
		tx_frag_num++;
	if (aead_parms.iv_len)
		tx_frag_num++;
	if (spu->spu_tx_status_len())
		tx_frag_num++;
	err = spu_aead_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
				    rctx->assoc, aead_parms.assoc_size,
				    assoc_nents, aead_parms.iv_len, chunksize,
				    aead_parms.aad_pad_len, pad_len, incl_icv);
	if (err)
		return err;

	err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
	if (unlikely(err < 0))
		return err;

	return -EINPROGRESS;
}