void jsimd_idct_4x4_neon()

in simd/arm/jidctred-neon.c [196:486]


void jsimd_idct_4x4_neon(void *dct_table, JCOEFPTR coef_block,
                         JSAMPARRAY output_buf, JDIMENSION output_col)
{
  ISLOW_MULT_TYPE *quantptr = dct_table;

  /* Load DCT coefficients. */
  int16x8_t row0  = vld1q_s16(coef_block + 0 * DCTSIZE);
  int16x8_t row1  = vld1q_s16(coef_block + 1 * DCTSIZE);
  int16x8_t row2  = vld1q_s16(coef_block + 2 * DCTSIZE);
  int16x8_t row3  = vld1q_s16(coef_block + 3 * DCTSIZE);
  int16x8_t row5  = vld1q_s16(coef_block + 5 * DCTSIZE);
  int16x8_t row6  = vld1q_s16(coef_block + 6 * DCTSIZE);
  int16x8_t row7  = vld1q_s16(coef_block + 7 * DCTSIZE);

  /* Load quantization table values for DC coefficients. */
  int16x8_t quant_row0 = vld1q_s16(quantptr + 0 * DCTSIZE);
  /* Dequantize DC coefficients. */
  row0 = vmulq_s16(row0, quant_row0);

  /* Construct bitmap to test if all AC coefficients are 0. */
  int16x8_t bitmap = vorrq_s16(row1, row2);
  bitmap = vorrq_s16(bitmap, row3);
  bitmap = vorrq_s16(bitmap, row5);
  bitmap = vorrq_s16(bitmap, row6);
  bitmap = vorrq_s16(bitmap, row7);

  int64_t left_ac_bitmap = vgetq_lane_s64(vreinterpretq_s64_s16(bitmap), 0);
  int64_t right_ac_bitmap = vgetq_lane_s64(vreinterpretq_s64_s16(bitmap), 1);

  /* Load constants for IDCT computation. */
#ifdef HAVE_VLD1_S16_X3
  const int16x4x3_t consts = vld1_s16_x3(jsimd_idct_4x4_neon_consts);
#else
  /* GCC does not currently support the intrinsic vld1_<type>_x3(). */
  const int16x4_t consts1 = vld1_s16(jsimd_idct_4x4_neon_consts);
  const int16x4_t consts2 = vld1_s16(jsimd_idct_4x4_neon_consts + 4);
  const int16x4_t consts3 = vld1_s16(jsimd_idct_4x4_neon_consts + 8);
  const int16x4x3_t consts = { { consts1, consts2, consts3 } };
#endif

  if (left_ac_bitmap == 0 && right_ac_bitmap == 0) {
    /* All AC coefficients are zero.
     * Compute DC values and duplicate into row vectors 0, 1, 2, and 3.
     */
    int16x8_t dcval = vshlq_n_s16(row0, PASS1_BITS);
    row0 = dcval;
    row1 = dcval;
    row2 = dcval;
    row3 = dcval;
  } else if (left_ac_bitmap == 0) {
    /* AC coefficients are zero for columns 0, 1, 2, and 3.
     * Compute DC values for these columns.
     */
    int16x4_t dcval = vshl_n_s16(vget_low_s16(row0), PASS1_BITS);

    /* Commence regular IDCT computation for columns 4, 5, 6, and 7. */

    /* Load quantization table. */
    int16x4_t quant_row1 = vld1_s16(quantptr + 1 * DCTSIZE + 4);
    int16x4_t quant_row2 = vld1_s16(quantptr + 2 * DCTSIZE + 4);
    int16x4_t quant_row3 = vld1_s16(quantptr + 3 * DCTSIZE + 4);
    int16x4_t quant_row5 = vld1_s16(quantptr + 5 * DCTSIZE + 4);
    int16x4_t quant_row6 = vld1_s16(quantptr + 6 * DCTSIZE + 4);
    int16x4_t quant_row7 = vld1_s16(quantptr + 7 * DCTSIZE + 4);

    /* Even part */
    int32x4_t tmp0 = vshll_n_s16(vget_high_s16(row0), CONST_BITS + 1);

    int16x4_t z2 = vmul_s16(vget_high_s16(row2), quant_row2);
    int16x4_t z3 = vmul_s16(vget_high_s16(row6), quant_row6);

    int32x4_t tmp2 = vmull_lane_s16(z2, consts.val[0], 0);
    tmp2 = vmlal_lane_s16(tmp2, z3, consts.val[0], 1);

    int32x4_t tmp10 = vaddq_s32(tmp0, tmp2);
    int32x4_t tmp12 = vsubq_s32(tmp0, tmp2);

    /* Odd part */
    int16x4_t z1 = vmul_s16(vget_high_s16(row7), quant_row7);
    z2 = vmul_s16(vget_high_s16(row5), quant_row5);
    z3 = vmul_s16(vget_high_s16(row3), quant_row3);
    int16x4_t z4 = vmul_s16(vget_high_s16(row1), quant_row1);

    tmp0 = vmull_lane_s16(z1, consts.val[0], 2);
    tmp0 = vmlal_lane_s16(tmp0, z2, consts.val[0], 3);
    tmp0 = vmlal_lane_s16(tmp0, z3, consts.val[1], 0);
    tmp0 = vmlal_lane_s16(tmp0, z4, consts.val[1], 1);

    tmp2 = vmull_lane_s16(z1, consts.val[1], 2);
    tmp2 = vmlal_lane_s16(tmp2, z2, consts.val[1], 3);
    tmp2 = vmlal_lane_s16(tmp2, z3, consts.val[2], 0);
    tmp2 = vmlal_lane_s16(tmp2, z4, consts.val[2], 1);

    /* Final output stage: descale and narrow to 16-bit. */
    row0 = vcombine_s16(dcval, vrshrn_n_s32(vaddq_s32(tmp10, tmp2),
                                            CONST_BITS - PASS1_BITS + 1));
    row3 = vcombine_s16(dcval, vrshrn_n_s32(vsubq_s32(tmp10, tmp2),
                                            CONST_BITS - PASS1_BITS + 1));
    row1 = vcombine_s16(dcval, vrshrn_n_s32(vaddq_s32(tmp12, tmp0),
                                            CONST_BITS - PASS1_BITS + 1));
    row2 = vcombine_s16(dcval, vrshrn_n_s32(vsubq_s32(tmp12, tmp0),
                                            CONST_BITS - PASS1_BITS + 1));
  } else if (right_ac_bitmap == 0) {
    /* AC coefficients are zero for columns 4, 5, 6, and 7.
     * Compute DC values for these columns.
     */
    int16x4_t dcval = vshl_n_s16(vget_high_s16(row0), PASS1_BITS);

    /* Commence regular IDCT computation for columns 0, 1, 2, and 3. */

    /* Load quantization table. */
    int16x4_t quant_row1 = vld1_s16(quantptr + 1 * DCTSIZE);
    int16x4_t quant_row2 = vld1_s16(quantptr + 2 * DCTSIZE);
    int16x4_t quant_row3 = vld1_s16(quantptr + 3 * DCTSIZE);
    int16x4_t quant_row5 = vld1_s16(quantptr + 5 * DCTSIZE);
    int16x4_t quant_row6 = vld1_s16(quantptr + 6 * DCTSIZE);
    int16x4_t quant_row7 = vld1_s16(quantptr + 7 * DCTSIZE);

    /* Even part */
    int32x4_t tmp0 = vshll_n_s16(vget_low_s16(row0), CONST_BITS + 1);

    int16x4_t z2 = vmul_s16(vget_low_s16(row2), quant_row2);
    int16x4_t z3 = vmul_s16(vget_low_s16(row6), quant_row6);

    int32x4_t tmp2 = vmull_lane_s16(z2, consts.val[0], 0);
    tmp2 = vmlal_lane_s16(tmp2, z3, consts.val[0], 1);

    int32x4_t tmp10 = vaddq_s32(tmp0, tmp2);
    int32x4_t tmp12 = vsubq_s32(tmp0, tmp2);

    /* Odd part */
    int16x4_t z1 = vmul_s16(vget_low_s16(row7), quant_row7);
    z2 = vmul_s16(vget_low_s16(row5), quant_row5);
    z3 = vmul_s16(vget_low_s16(row3), quant_row3);
    int16x4_t z4 = vmul_s16(vget_low_s16(row1), quant_row1);

    tmp0 = vmull_lane_s16(z1, consts.val[0], 2);
    tmp0 = vmlal_lane_s16(tmp0, z2, consts.val[0], 3);
    tmp0 = vmlal_lane_s16(tmp0, z3, consts.val[1], 0);
    tmp0 = vmlal_lane_s16(tmp0, z4, consts.val[1], 1);

    tmp2 = vmull_lane_s16(z1, consts.val[1], 2);
    tmp2 = vmlal_lane_s16(tmp2, z2, consts.val[1], 3);
    tmp2 = vmlal_lane_s16(tmp2, z3, consts.val[2], 0);
    tmp2 = vmlal_lane_s16(tmp2, z4, consts.val[2], 1);

    /* Final output stage: descale and narrow to 16-bit. */
    row0 = vcombine_s16(vrshrn_n_s32(vaddq_s32(tmp10, tmp2),
                                     CONST_BITS - PASS1_BITS + 1), dcval);
    row3 = vcombine_s16(vrshrn_n_s32(vsubq_s32(tmp10, tmp2),
                                     CONST_BITS - PASS1_BITS + 1), dcval);
    row1 = vcombine_s16(vrshrn_n_s32(vaddq_s32(tmp12, tmp0),
                                     CONST_BITS - PASS1_BITS + 1), dcval);
    row2 = vcombine_s16(vrshrn_n_s32(vsubq_s32(tmp12, tmp0),
                                     CONST_BITS - PASS1_BITS + 1), dcval);
  } else {
    /* All AC coefficients are non-zero; full IDCT calculation required. */
    int16x8_t quant_row1 = vld1q_s16(quantptr + 1 * DCTSIZE);
    int16x8_t quant_row2 = vld1q_s16(quantptr + 2 * DCTSIZE);
    int16x8_t quant_row3 = vld1q_s16(quantptr + 3 * DCTSIZE);
    int16x8_t quant_row5 = vld1q_s16(quantptr + 5 * DCTSIZE);
    int16x8_t quant_row6 = vld1q_s16(quantptr + 6 * DCTSIZE);
    int16x8_t quant_row7 = vld1q_s16(quantptr + 7 * DCTSIZE);

    /* Even part */
    int32x4_t tmp0_l = vshll_n_s16(vget_low_s16(row0), CONST_BITS + 1);
    int32x4_t tmp0_h = vshll_n_s16(vget_high_s16(row0), CONST_BITS + 1);

    int16x8_t z2 = vmulq_s16(row2, quant_row2);
    int16x8_t z3 = vmulq_s16(row6, quant_row6);

    int32x4_t tmp2_l = vmull_lane_s16(vget_low_s16(z2), consts.val[0], 0);
    int32x4_t tmp2_h = vmull_lane_s16(vget_high_s16(z2), consts.val[0], 0);
    tmp2_l = vmlal_lane_s16(tmp2_l, vget_low_s16(z3), consts.val[0], 1);
    tmp2_h = vmlal_lane_s16(tmp2_h, vget_high_s16(z3), consts.val[0], 1);

    int32x4_t tmp10_l = vaddq_s32(tmp0_l, tmp2_l);
    int32x4_t tmp10_h = vaddq_s32(tmp0_h, tmp2_h);
    int32x4_t tmp12_l = vsubq_s32(tmp0_l, tmp2_l);
    int32x4_t tmp12_h = vsubq_s32(tmp0_h, tmp2_h);

    /* Odd part */
    int16x8_t z1 = vmulq_s16(row7, quant_row7);
    z2 = vmulq_s16(row5, quant_row5);
    z3 = vmulq_s16(row3, quant_row3);
    int16x8_t z4 = vmulq_s16(row1, quant_row1);

    tmp0_l = vmull_lane_s16(vget_low_s16(z1), consts.val[0], 2);
    tmp0_l = vmlal_lane_s16(tmp0_l, vget_low_s16(z2), consts.val[0], 3);
    tmp0_l = vmlal_lane_s16(tmp0_l, vget_low_s16(z3), consts.val[1], 0);
    tmp0_l = vmlal_lane_s16(tmp0_l, vget_low_s16(z4), consts.val[1], 1);
    tmp0_h = vmull_lane_s16(vget_high_s16(z1), consts.val[0], 2);
    tmp0_h = vmlal_lane_s16(tmp0_h, vget_high_s16(z2), consts.val[0], 3);
    tmp0_h = vmlal_lane_s16(tmp0_h, vget_high_s16(z3), consts.val[1], 0);
    tmp0_h = vmlal_lane_s16(tmp0_h, vget_high_s16(z4), consts.val[1], 1);

    tmp2_l = vmull_lane_s16(vget_low_s16(z1), consts.val[1], 2);
    tmp2_l = vmlal_lane_s16(tmp2_l, vget_low_s16(z2), consts.val[1], 3);
    tmp2_l = vmlal_lane_s16(tmp2_l, vget_low_s16(z3), consts.val[2], 0);
    tmp2_l = vmlal_lane_s16(tmp2_l, vget_low_s16(z4), consts.val[2], 1);
    tmp2_h = vmull_lane_s16(vget_high_s16(z1), consts.val[1], 2);
    tmp2_h = vmlal_lane_s16(tmp2_h, vget_high_s16(z2), consts.val[1], 3);
    tmp2_h = vmlal_lane_s16(tmp2_h, vget_high_s16(z3), consts.val[2], 0);
    tmp2_h = vmlal_lane_s16(tmp2_h, vget_high_s16(z4), consts.val[2], 1);

    /* Final output stage: descale and narrow to 16-bit. */
    row0 = vcombine_s16(vrshrn_n_s32(vaddq_s32(tmp10_l, tmp2_l),
                                     CONST_BITS - PASS1_BITS + 1),
                        vrshrn_n_s32(vaddq_s32(tmp10_h, tmp2_h),
                                     CONST_BITS - PASS1_BITS + 1));
    row3 = vcombine_s16(vrshrn_n_s32(vsubq_s32(tmp10_l, tmp2_l),
                                     CONST_BITS - PASS1_BITS + 1),
                        vrshrn_n_s32(vsubq_s32(tmp10_h, tmp2_h),
                                     CONST_BITS - PASS1_BITS + 1));
    row1 = vcombine_s16(vrshrn_n_s32(vaddq_s32(tmp12_l, tmp0_l),
                                     CONST_BITS - PASS1_BITS + 1),
                        vrshrn_n_s32(vaddq_s32(tmp12_h, tmp0_h),
                                     CONST_BITS - PASS1_BITS + 1));
    row2 = vcombine_s16(vrshrn_n_s32(vsubq_s32(tmp12_l, tmp0_l),
                                     CONST_BITS - PASS1_BITS + 1),
                        vrshrn_n_s32(vsubq_s32(tmp12_h, tmp0_h),
                                     CONST_BITS - PASS1_BITS + 1));
  }

  /* Transpose 8x4 block to perform IDCT on rows in second pass. */
  int16x8x2_t row_01 = vtrnq_s16(row0, row1);
  int16x8x2_t row_23 = vtrnq_s16(row2, row3);

  int32x4x2_t cols_0426 = vtrnq_s32(vreinterpretq_s32_s16(row_01.val[0]),
                                    vreinterpretq_s32_s16(row_23.val[0]));
  int32x4x2_t cols_1537 = vtrnq_s32(vreinterpretq_s32_s16(row_01.val[1]),
                                    vreinterpretq_s32_s16(row_23.val[1]));

  int16x4_t col0 = vreinterpret_s16_s32(vget_low_s32(cols_0426.val[0]));
  int16x4_t col1 = vreinterpret_s16_s32(vget_low_s32(cols_1537.val[0]));
  int16x4_t col2 = vreinterpret_s16_s32(vget_low_s32(cols_0426.val[1]));
  int16x4_t col3 = vreinterpret_s16_s32(vget_low_s32(cols_1537.val[1]));
  int16x4_t col5 = vreinterpret_s16_s32(vget_high_s32(cols_1537.val[0]));
  int16x4_t col6 = vreinterpret_s16_s32(vget_high_s32(cols_0426.val[1]));
  int16x4_t col7 = vreinterpret_s16_s32(vget_high_s32(cols_1537.val[1]));

  /* Commence second pass of IDCT. */

  /* Even part */
  int32x4_t tmp0 = vshll_n_s16(col0, CONST_BITS + 1);
  int32x4_t tmp2 = vmull_lane_s16(col2, consts.val[0], 0);
  tmp2 = vmlal_lane_s16(tmp2, col6, consts.val[0], 1);

  int32x4_t tmp10 = vaddq_s32(tmp0, tmp2);
  int32x4_t tmp12 = vsubq_s32(tmp0, tmp2);

  /* Odd part */
  tmp0 = vmull_lane_s16(col7, consts.val[0], 2);
  tmp0 = vmlal_lane_s16(tmp0, col5, consts.val[0], 3);
  tmp0 = vmlal_lane_s16(tmp0, col3, consts.val[1], 0);
  tmp0 = vmlal_lane_s16(tmp0, col1, consts.val[1], 1);

  tmp2 = vmull_lane_s16(col7, consts.val[1], 2);
  tmp2 = vmlal_lane_s16(tmp2, col5, consts.val[1], 3);
  tmp2 = vmlal_lane_s16(tmp2, col3, consts.val[2], 0);
  tmp2 = vmlal_lane_s16(tmp2, col1, consts.val[2], 1);

  /* Final output stage: descale and clamp to range [0-255]. */
  int16x8_t output_cols_02 = vcombine_s16(vaddhn_s32(tmp10, tmp2),
                                          vsubhn_s32(tmp12, tmp0));
  int16x8_t output_cols_13 = vcombine_s16(vaddhn_s32(tmp12, tmp0),
                                          vsubhn_s32(tmp10, tmp2));
  output_cols_02 = vrsraq_n_s16(vdupq_n_s16(CENTERJSAMPLE), output_cols_02,
                                CONST_BITS + PASS1_BITS + 3 + 1 - 16);
  output_cols_13 = vrsraq_n_s16(vdupq_n_s16(CENTERJSAMPLE), output_cols_13,
                                CONST_BITS + PASS1_BITS + 3 + 1 - 16);
  /* Narrow to 8-bit and convert to unsigned while zipping 8-bit elements.
   * An interleaving store completes the transpose.
   */
  uint8x8x2_t output_0123 = vzip_u8(vqmovun_s16(output_cols_02),
                                    vqmovun_s16(output_cols_13));
  uint16x4x2_t output_01_23 = { {
    vreinterpret_u16_u8(output_0123.val[0]),
    vreinterpret_u16_u8(output_0123.val[1])
  } };

  /* Store 4x4 block to memory. */
  JSAMPROW outptr0 = output_buf[0] + output_col;
  JSAMPROW outptr1 = output_buf[1] + output_col;
  JSAMPROW outptr2 = output_buf[2] + output_col;
  JSAMPROW outptr3 = output_buf[3] + output_col;
  vst2_lane_u16((uint16_t *)outptr0, output_01_23, 0);
  vst2_lane_u16((uint16_t *)outptr1, output_01_23, 1);
  vst2_lane_u16((uint16_t *)outptr2, output_01_23, 2);
  vst2_lane_u16((uint16_t *)outptr3, output_01_23, 3);
}