case glue()

in tcg/s390x/tcg-target.c.inc [2017:2610]


        case glue(glue(INDEX_op_,x),_i32): \
        case glue(glue(INDEX_op_,x),_i64)

static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
                              const TCGArg args[TCG_MAX_OP_ARGS],
                              const int const_args[TCG_MAX_OP_ARGS])
{
    S390Opcode op, op2;
    TCGArg a0, a1, a2;

    switch (opc) {
    case INDEX_op_exit_tb:
        /* Reuse the zeroing that exists for goto_ptr.  */
        a0 = args[0];
        if (a0 == 0) {
            tgen_gotoi(s, S390_CC_ALWAYS, tcg_code_gen_epilogue);
        } else {
            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, a0);
            tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
        }
        break;

    case INDEX_op_goto_tb:
        a0 = args[0];
        if (s->tb_jmp_insn_offset) {
            /*
             * branch displacement must be aligned for atomic patching;
             * see if we need to add extra nop before branch
             */
            if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
                tcg_out16(s, NOP);
            }
            tcg_debug_assert(!USE_REG_TB);
            tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
            s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
            s->code_ptr += 2;
        } else {
            /* load address stored at s->tb_jmp_target_addr + a0 */
            tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_REG_TB,
                           tcg_splitwx_to_rx(s->tb_jmp_target_addr + a0));
            /* and go there */
            tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_TB);
        }
        set_jmp_reset_offset(s, a0);

        /* For the unlinked path of goto_tb, we need to reset
           TCG_REG_TB to the beginning of this TB.  */
        if (USE_REG_TB) {
            int ofs = -tcg_current_code_size(s);
            /* All TB are restricted to 64KiB by unwind info. */
            tcg_debug_assert(ofs == sextract64(ofs, 0, 20));
            tcg_out_insn(s, RXY, LAY, TCG_REG_TB,
                         TCG_REG_TB, TCG_REG_NONE, ofs);
        }
        break;

    case INDEX_op_goto_ptr:
        a0 = args[0];
        if (USE_REG_TB) {
            tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, a0);
        }
        tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0);
        break;

    OP_32_64(ld8u):
        /* ??? LLC (RXY format) is only present with the extended-immediate
           facility, whereas LLGC is always present.  */
        tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
        break;

    OP_32_64(ld8s):
        /* ??? LB is no smaller than LGB, so no point to using it.  */
        tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
        break;

    OP_32_64(ld16u):
        /* ??? LLH (RXY format) is only present with the extended-immediate
           facility, whereas LLGH is always present.  */
        tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
        break;

    case INDEX_op_ld16s_i32:
        tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
        break;

    case INDEX_op_ld_i32:
        tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
        break;

    OP_32_64(st8):
        tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
                    TCG_REG_NONE, args[2]);
        break;

    OP_32_64(st16):
        tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
                    TCG_REG_NONE, args[2]);
        break;

    case INDEX_op_st_i32:
        tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
        break;

    case INDEX_op_add_i32:
        a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
        if (const_args[2]) {
        do_addi_32:
            if (a0 == a1) {
                if (a2 == (int16_t)a2) {
                    tcg_out_insn(s, RI, AHI, a0, a2);
                    break;
                }
                if (HAVE_FACILITY(EXT_IMM)) {
                    tcg_out_insn(s, RIL, AFI, a0, a2);
                    break;
                }
            }
            tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
        } else if (a0 == a1) {
            tcg_out_insn(s, RR, AR, a0, a2);
        } else {
            tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
        }
        break;
    case INDEX_op_sub_i32:
        a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
        if (const_args[2]) {
            a2 = -a2;
            goto do_addi_32;
        } else if (a0 == a1) {
            tcg_out_insn(s, RR, SR, a0, a2);
        } else {
            tcg_out_insn(s, RRF, SRK, a0, a1, a2);
        }
        break;

    case INDEX_op_and_i32:
        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
        if (const_args[2]) {
            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
            tgen_andi(s, TCG_TYPE_I32, a0, a2);
        } else if (a0 == a1) {
            tcg_out_insn(s, RR, NR, a0, a2);
        } else {
            tcg_out_insn(s, RRF, NRK, a0, a1, a2);
        }
        break;
    case INDEX_op_or_i32:
        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
        if (const_args[2]) {
            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
            tgen_ori(s, TCG_TYPE_I32, a0, a2);
        } else if (a0 == a1) {
            tcg_out_insn(s, RR, OR, a0, a2);
        } else {
            tcg_out_insn(s, RRF, ORK, a0, a1, a2);
        }
        break;
    case INDEX_op_xor_i32:
        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
        if (const_args[2]) {
            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
            tgen_xori(s, TCG_TYPE_I32, a0, a2);
        } else if (a0 == a1) {
            tcg_out_insn(s, RR, XR, args[0], args[2]);
        } else {
            tcg_out_insn(s, RRF, XRK, a0, a1, a2);
        }
        break;

    case INDEX_op_neg_i32:
        tcg_out_insn(s, RR, LCR, args[0], args[1]);
        break;

    case INDEX_op_mul_i32:
        if (const_args[2]) {
            if ((int32_t)args[2] == (int16_t)args[2]) {
                tcg_out_insn(s, RI, MHI, args[0], args[2]);
            } else {
                tcg_out_insn(s, RIL, MSFI, args[0], args[2]);
            }
        } else {
            tcg_out_insn(s, RRE, MSR, args[0], args[2]);
        }
        break;

    case INDEX_op_div2_i32:
        tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]);
        break;
    case INDEX_op_divu2_i32:
        tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]);
        break;

    case INDEX_op_shl_i32:
        op = RS_SLL;
        op2 = RSY_SLLK;
    do_shift32:
        a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
        if (a0 == a1) {
            if (const_args[2]) {
                tcg_out_sh32(s, op, a0, TCG_REG_NONE, a2);
            } else {
                tcg_out_sh32(s, op, a0, a2, 0);
            }
        } else {
            /* Using tcg_out_sh64 here for the format; it is a 32-bit shift.  */
            if (const_args[2]) {
                tcg_out_sh64(s, op2, a0, a1, TCG_REG_NONE, a2);
            } else {
                tcg_out_sh64(s, op2, a0, a1, a2, 0);
            }
        }
        break;
    case INDEX_op_shr_i32:
        op = RS_SRL;
        op2 = RSY_SRLK;
        goto do_shift32;
    case INDEX_op_sar_i32:
        op = RS_SRA;
        op2 = RSY_SRAK;
        goto do_shift32;

    case INDEX_op_rotl_i32:
        /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol.  */
        if (const_args[2]) {
            tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
        } else {
            tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
        }
        break;
    case INDEX_op_rotr_i32:
        if (const_args[2]) {
            tcg_out_sh64(s, RSY_RLL, args[0], args[1],
                         TCG_REG_NONE, (32 - args[2]) & 31);
        } else {
            tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
            tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
        }
        break;

    case INDEX_op_ext8s_i32:
        tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
        break;
    case INDEX_op_ext16s_i32:
        tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
        break;
    case INDEX_op_ext8u_i32:
        tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]);
        break;
    case INDEX_op_ext16u_i32:
        tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
        break;

    case INDEX_op_bswap16_i32:
        a0 = args[0], a1 = args[1], a2 = args[2];
        tcg_out_insn(s, RRE, LRVR, a0, a1);
        if (a2 & TCG_BSWAP_OS) {
            tcg_out_sh32(s, RS_SRA, a0, TCG_REG_NONE, 16);
        } else {
            tcg_out_sh32(s, RS_SRL, a0, TCG_REG_NONE, 16);
        }
        break;
    case INDEX_op_bswap16_i64:
        a0 = args[0], a1 = args[1], a2 = args[2];
        tcg_out_insn(s, RRE, LRVGR, a0, a1);
        if (a2 & TCG_BSWAP_OS) {
            tcg_out_sh64(s, RSY_SRAG, a0, a0, TCG_REG_NONE, 48);
        } else {
            tcg_out_sh64(s, RSY_SRLG, a0, a0, TCG_REG_NONE, 48);
        }
        break;

    case INDEX_op_bswap32_i32:
        tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
        break;
    case INDEX_op_bswap32_i64:
        a0 = args[0], a1 = args[1], a2 = args[2];
        tcg_out_insn(s, RRE, LRVR, a0, a1);
        if (a2 & TCG_BSWAP_OS) {
            tgen_ext32s(s, a0, a0);
        } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
            tgen_ext32u(s, a0, a0);
        }
        break;

    case INDEX_op_add2_i32:
        if (const_args[4]) {
            tcg_out_insn(s, RIL, ALFI, args[0], args[4]);
        } else {
            tcg_out_insn(s, RR, ALR, args[0], args[4]);
        }
        tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
        break;
    case INDEX_op_sub2_i32:
        if (const_args[4]) {
            tcg_out_insn(s, RIL, SLFI, args[0], args[4]);
        } else {
            tcg_out_insn(s, RR, SLR, args[0], args[4]);
        }
        tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
        break;

    case INDEX_op_br:
        tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
        break;

    case INDEX_op_brcond_i32:
        tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
                    args[1], const_args[1], arg_label(args[3]));
        break;
    case INDEX_op_setcond_i32:
        tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
                     args[2], const_args[2]);
        break;
    case INDEX_op_movcond_i32:
        tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
                     args[2], const_args[2], args[3], const_args[3]);
        break;

    case INDEX_op_qemu_ld_i32:
        /* ??? Technically we can use a non-extending instruction.  */
    case INDEX_op_qemu_ld_i64:
        tcg_out_qemu_ld(s, args[0], args[1], args[2]);
        break;
    case INDEX_op_qemu_st_i32:
    case INDEX_op_qemu_st_i64:
        tcg_out_qemu_st(s, args[0], args[1], args[2]);
        break;

    case INDEX_op_ld16s_i64:
        tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
        break;
    case INDEX_op_ld32u_i64:
        tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
        break;
    case INDEX_op_ld32s_i64:
        tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
        break;
    case INDEX_op_ld_i64:
        tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
        break;

    case INDEX_op_st32_i64:
        tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
        break;
    case INDEX_op_st_i64:
        tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
        break;

    case INDEX_op_add_i64:
        a0 = args[0], a1 = args[1], a2 = args[2];
        if (const_args[2]) {
        do_addi_64:
            if (a0 == a1) {
                if (a2 == (int16_t)a2) {
                    tcg_out_insn(s, RI, AGHI, a0, a2);
                    break;
                }
                if (HAVE_FACILITY(EXT_IMM)) {
                    if (a2 == (int32_t)a2) {
                        tcg_out_insn(s, RIL, AGFI, a0, a2);
                        break;
                    } else if (a2 == (uint32_t)a2) {
                        tcg_out_insn(s, RIL, ALGFI, a0, a2);
                        break;
                    } else if (-a2 == (uint32_t)-a2) {
                        tcg_out_insn(s, RIL, SLGFI, a0, -a2);
                        break;
                    }
                }
            }
            tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
        } else if (a0 == a1) {
            tcg_out_insn(s, RRE, AGR, a0, a2);
        } else {
            tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
        }
        break;
    case INDEX_op_sub_i64:
        a0 = args[0], a1 = args[1], a2 = args[2];
        if (const_args[2]) {
            a2 = -a2;
            goto do_addi_64;
        } else if (a0 == a1) {
            tcg_out_insn(s, RRE, SGR, a0, a2);
        } else {
            tcg_out_insn(s, RRF, SGRK, a0, a1, a2);
        }
        break;

    case INDEX_op_and_i64:
        a0 = args[0], a1 = args[1], a2 = args[2];
        if (const_args[2]) {
            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
            tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
        } else if (a0 == a1) {
            tcg_out_insn(s, RRE, NGR, args[0], args[2]);
        } else {
            tcg_out_insn(s, RRF, NGRK, a0, a1, a2);
        }
        break;
    case INDEX_op_or_i64:
        a0 = args[0], a1 = args[1], a2 = args[2];
        if (const_args[2]) {
            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
            tgen_ori(s, TCG_TYPE_I64, a0, a2);
        } else if (a0 == a1) {
            tcg_out_insn(s, RRE, OGR, a0, a2);
        } else {
            tcg_out_insn(s, RRF, OGRK, a0, a1, a2);
        }
        break;
    case INDEX_op_xor_i64:
        a0 = args[0], a1 = args[1], a2 = args[2];
        if (const_args[2]) {
            tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
            tgen_xori(s, TCG_TYPE_I64, a0, a2);
        } else if (a0 == a1) {
            tcg_out_insn(s, RRE, XGR, a0, a2);
        } else {
            tcg_out_insn(s, RRF, XGRK, a0, a1, a2);
        }
        break;

    case INDEX_op_neg_i64:
        tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
        break;
    case INDEX_op_bswap64_i64:
        tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
        break;

    case INDEX_op_mul_i64:
        if (const_args[2]) {
            if (args[2] == (int16_t)args[2]) {
                tcg_out_insn(s, RI, MGHI, args[0], args[2]);
            } else {
                tcg_out_insn(s, RIL, MSGFI, args[0], args[2]);
            }
        } else {
            tcg_out_insn(s, RRE, MSGR, args[0], args[2]);
        }
        break;

    case INDEX_op_div2_i64:
        /* ??? We get an unnecessary sign-extension of the dividend
           into R3 with this definition, but as we do in fact always
           produce both quotient and remainder using INDEX_op_div_i64
           instead requires jumping through even more hoops.  */
        tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]);
        break;
    case INDEX_op_divu2_i64:
        tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]);
        break;
    case INDEX_op_mulu2_i64:
        tcg_out_insn(s, RRE, MLGR, TCG_REG_R2, args[3]);
        break;

    case INDEX_op_shl_i64:
        op = RSY_SLLG;
    do_shift64:
        if (const_args[2]) {
            tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
        } else {
            tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
        }
        break;
    case INDEX_op_shr_i64:
        op = RSY_SRLG;
        goto do_shift64;
    case INDEX_op_sar_i64:
        op = RSY_SRAG;
        goto do_shift64;

    case INDEX_op_rotl_i64:
        if (const_args[2]) {
            tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
                         TCG_REG_NONE, args[2]);
        } else {
            tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
        }
        break;
    case INDEX_op_rotr_i64:
        if (const_args[2]) {
            tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
                         TCG_REG_NONE, (64 - args[2]) & 63);
        } else {
            /* We can use the smaller 32-bit negate because only the
               low 6 bits are examined for the rotate.  */
            tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
            tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
        }
        break;

    case INDEX_op_ext8s_i64:
        tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]);
        break;
    case INDEX_op_ext16s_i64:
        tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
        break;
    case INDEX_op_ext_i32_i64:
    case INDEX_op_ext32s_i64:
        tgen_ext32s(s, args[0], args[1]);
        break;
    case INDEX_op_ext8u_i64:
        tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]);
        break;
    case INDEX_op_ext16u_i64:
        tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
        break;
    case INDEX_op_extu_i32_i64:
    case INDEX_op_ext32u_i64:
        tgen_ext32u(s, args[0], args[1]);
        break;

    case INDEX_op_add2_i64:
        if (const_args[4]) {
            if ((int64_t)args[4] >= 0) {
                tcg_out_insn(s, RIL, ALGFI, args[0], args[4]);
            } else {
                tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]);
            }
        } else {
            tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
        }
        tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
        break;
    case INDEX_op_sub2_i64:
        if (const_args[4]) {
            if ((int64_t)args[4] >= 0) {
                tcg_out_insn(s, RIL, SLGFI, args[0], args[4]);
            } else {
                tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]);
            }
        } else {
            tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
        }
        tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
        break;

    case INDEX_op_brcond_i64:
        tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
                    args[1], const_args[1], arg_label(args[3]));
        break;
    case INDEX_op_setcond_i64:
        tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
                     args[2], const_args[2]);
        break;
    case INDEX_op_movcond_i64:
        tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
                     args[2], const_args[2], args[3], const_args[3]);
        break;

    OP_32_64(deposit):
        a0 = args[0], a1 = args[1], a2 = args[2];
        if (const_args[1]) {
            tgen_deposit(s, a0, a2, args[3], args[4], 1);
        } else {
            /* Since we can't support "0Z" as a constraint, we allow a1 in
               any register.  Fix things up as if a matching constraint.  */
            if (a0 != a1) {
                TCGType type = (opc == INDEX_op_deposit_i64);
                if (a0 == a2) {
                    tcg_out_mov(s, type, TCG_TMP0, a2);
                    a2 = TCG_TMP0;
                }
                tcg_out_mov(s, type, a0, a1);
            }
            tgen_deposit(s, a0, a2, args[3], args[4], 0);
        }
        break;

    OP_32_64(extract):
        tgen_extract(s, args[0], args[1], args[2], args[3]);
        break;

    case INDEX_op_clz_i64:
        tgen_clz(s, args[0], args[1], args[2], const_args[2]);
        break;

    case INDEX_op_mb:
        /* The host memory model is quite strong, we simply need to
           serialize the instruction stream.  */
        if (args[0] & TCG_MO_ST_LD) {
            tcg_out_insn(s, RR, BCR, HAVE_FACILITY(FAST_BCR_SER) ? 14 : 15, 0);
        }
        break;

    case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
    case INDEX_op_mov_i64:
    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
    default:
        tcg_abort();
    }
}