int aws_cryptosdk_encrypt_body()

in source/cipher.c [646:744]


int aws_cryptosdk_encrypt_body(
    const struct aws_cryptosdk_alg_properties *props,
    struct aws_byte_buf *outp,
    const struct aws_byte_cursor *inp,
    const struct aws_byte_buf *message_id,
    uint32_t seqno,
    uint8_t *iv,
    const struct content_key *key,
    uint8_t *tag,
    int body_frame_type) {
    AWS_PRECONDITION(aws_cryptosdk_alg_properties_is_valid(props));
    AWS_PRECONDITION(
        aws_byte_buf_is_valid(outp) ||
        /* This happens when outp comes from a frame, which input plaintext_size was 0. */
        (outp->len == 0 && outp->capacity == 0 && outp->buffer));
    AWS_PRECONDITION(aws_byte_cursor_is_valid(inp));
    AWS_PRECONDITION(aws_byte_buf_is_valid(message_id));
    AWS_PRECONDITION(iv != NULL);
    AWS_PRECONDITION(tag != NULL);
    AWS_PRECONDITION(AWS_MEM_IS_WRITABLE(tag, props->tag_len));
    if (inp->len != outp->capacity) {
        return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
    }

    /*
     * We use a deterministic IV generation algorithm; the frame sequence number
     * is used for the IV. To avoid collisions with the header IV, seqno=0 is
     * forbidden.
     */
    if (seqno == 0) {
        return aws_raise_error(AWS_CRYPTOSDK_ERR_CRYPTO_UNKNOWN);
    }

    uint64_t iv_seq = aws_hton64(seqno);

    /*
     * Paranoid check to make sure we're not going to walk off the end of the IV
     * buffer if someone in the future introduces an algorithm with a really small
     * IV for some reason.
     */
    if (props->iv_len < sizeof(iv_seq)) {
        return aws_raise_error(AWS_CRYPTOSDK_ERR_CRYPTO_UNKNOWN);
    }

    aws_secure_zero(iv, props->iv_len);

    uint8_t *iv_seq_p = iv + props->iv_len - sizeof(iv_seq);
    memcpy(iv_seq_p, &iv_seq, sizeof(iv_seq));

    EVP_CIPHER_CTX *ctx = NULL;

    int result = AWS_CRYPTOSDK_ERR_CRYPTO_UNKNOWN;

    if (!(ctx = evp_gcm_cipher_init(props, key, iv, true))) goto out;
    if (!update_frame_aad(ctx, message_id, body_frame_type, seqno, inp->len)) goto out;

    struct aws_byte_buf outbuf    = *outp;
    struct aws_byte_cursor incurs = *inp;

    while (incurs.len) {
        if (incurs.len != outbuf.capacity - outbuf.len) {
            /*
             * None of the algorithms we currently support should break this invariant.
             * Bail out immediately with an unknown error.
             */
            goto out;
        }

        int in_len = incurs.len > INT_MAX ? INT_MAX : incurs.len;
        int ct_len;

        if (!EVP_EncryptUpdate(ctx, outbuf.buffer + outbuf.len, &ct_len, incurs.ptr, in_len)) goto out;
        /*
         * The next two advances should never fail ... but check the return values
         * just in case.
         */
        if (!aws_byte_cursor_advance_nospec(&incurs, in_len).ptr) goto out;

        if (aws_add_size_checked(outbuf.len, ct_len, &outbuf.len)) goto out;

        if (outbuf.capacity < outbuf.len) {
            /* Somehow we ran over the output buffer. abort() to limit the damage. */
            abort();
        }
    }

    result = evp_gcm_encrypt_final(props, ctx, tag);

out:
    if (ctx) EVP_CIPHER_CTX_free(ctx);

    if (result == AWS_ERROR_SUCCESS) {
        *outp = outbuf;
        return AWS_OP_SUCCESS;
    } else {
        aws_byte_buf_secure_zero(outp);
        return aws_raise_error(result);
    }
}