in tcg/sparc/tcg-target.c.inc [1287:1583]
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
{
TCGArg a0, a1, a2;
int c, c2;
/* Hoist the loads of the most common arguments. */
a0 = args[0];
a1 = args[1];
a2 = args[2];
c2 = const_args[2];
switch (opc) {
case INDEX_op_exit_tb:
if (check_fit_ptr(a0, 13)) {
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
tcg_out_movi_imm13(s, TCG_REG_O0, a0);
break;
} else if (USE_REG_TB) {
intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
if (check_fit_ptr(tb_diff, 13)) {
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
/* Note that TCG_REG_TB has been unwound to O1. */
tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
break;
}
}
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
break;
case INDEX_op_goto_tb:
if (s->tb_jmp_insn_offset) {
/* direct jump method */
if (USE_REG_TB) {
/* make sure the patch is 8-byte aligned. */
if ((intptr_t)s->code_ptr & 4) {
tcg_out_nop(s);
}
s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
tcg_out_sethi(s, TCG_REG_T1, 0);
tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
} else {
s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
tcg_out32(s, CALL);
tcg_out_nop(s);
}
} else {
/* indirect jump method */
tcg_out_ld_ptr(s, TCG_REG_TB, s->tb_jmp_target_addr + a0);
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
tcg_out_nop(s);
}
set_jmp_reset_offset(s, a0);
/* For the unlinked path of goto_tb, we need to reset
TCG_REG_TB to the beginning of this TB. */
if (USE_REG_TB) {
c = -tcg_current_code_size(s);
if (check_fit_i32(c, 13)) {
tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
} else {
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB,
TCG_REG_T1, ARITH_ADD);
}
}
break;
case INDEX_op_goto_ptr:
tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
if (USE_REG_TB) {
tcg_out_mov_delay(s, TCG_REG_TB, a0);
} else {
tcg_out_nop(s);
}
break;
case INDEX_op_br:
tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
tcg_out_nop(s);
break;
#define OP_32_64(x) \
glue(glue(case INDEX_op_, x), _i32): \
glue(glue(case INDEX_op_, x), _i64)
OP_32_64(ld8u):
tcg_out_ldst(s, a0, a1, a2, LDUB);
break;
OP_32_64(ld8s):
tcg_out_ldst(s, a0, a1, a2, LDSB);
break;
OP_32_64(ld16u):
tcg_out_ldst(s, a0, a1, a2, LDUH);
break;
OP_32_64(ld16s):
tcg_out_ldst(s, a0, a1, a2, LDSH);
break;
case INDEX_op_ld_i32:
case INDEX_op_ld32u_i64:
tcg_out_ldst(s, a0, a1, a2, LDUW);
break;
OP_32_64(st8):
tcg_out_ldst(s, a0, a1, a2, STB);
break;
OP_32_64(st16):
tcg_out_ldst(s, a0, a1, a2, STH);
break;
case INDEX_op_st_i32:
case INDEX_op_st32_i64:
tcg_out_ldst(s, a0, a1, a2, STW);
break;
OP_32_64(add):
c = ARITH_ADD;
goto gen_arith;
OP_32_64(sub):
c = ARITH_SUB;
goto gen_arith;
OP_32_64(and):
c = ARITH_AND;
goto gen_arith;
OP_32_64(andc):
c = ARITH_ANDN;
goto gen_arith;
OP_32_64(or):
c = ARITH_OR;
goto gen_arith;
OP_32_64(orc):
c = ARITH_ORN;
goto gen_arith;
OP_32_64(xor):
c = ARITH_XOR;
goto gen_arith;
case INDEX_op_shl_i32:
c = SHIFT_SLL;
do_shift32:
/* Limit immediate shift count lest we create an illegal insn. */
tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
break;
case INDEX_op_shr_i32:
c = SHIFT_SRL;
goto do_shift32;
case INDEX_op_sar_i32:
c = SHIFT_SRA;
goto do_shift32;
case INDEX_op_mul_i32:
c = ARITH_UMUL;
goto gen_arith;
OP_32_64(neg):
c = ARITH_SUB;
goto gen_arith1;
OP_32_64(not):
c = ARITH_ORN;
goto gen_arith1;
case INDEX_op_div_i32:
tcg_out_div32(s, a0, a1, a2, c2, 0);
break;
case INDEX_op_divu_i32:
tcg_out_div32(s, a0, a1, a2, c2, 1);
break;
case INDEX_op_brcond_i32:
tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
break;
case INDEX_op_setcond_i32:
tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
break;
case INDEX_op_movcond_i32:
tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
break;
case INDEX_op_add2_i32:
tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
args[4], const_args[4], args[5], const_args[5],
ARITH_ADDCC, ARITH_ADDC);
break;
case INDEX_op_sub2_i32:
tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
args[4], const_args[4], args[5], const_args[5],
ARITH_SUBCC, ARITH_SUBC);
break;
case INDEX_op_mulu2_i32:
c = ARITH_UMUL;
goto do_mul2;
case INDEX_op_muls2_i32:
c = ARITH_SMUL;
do_mul2:
/* The 32-bit multiply insns produce a full 64-bit result. If the
destination register can hold it, we can avoid the slower RDY. */
tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
if (SPARC64 || a0 <= TCG_REG_O7) {
tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
} else {
tcg_out_rdy(s, a1);
}
break;
case INDEX_op_qemu_ld_i32:
tcg_out_qemu_ld(s, a0, a1, a2, false);
break;
case INDEX_op_qemu_ld_i64:
tcg_out_qemu_ld(s, a0, a1, a2, true);
break;
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_st_i64:
tcg_out_qemu_st(s, a0, a1, a2);
break;
case INDEX_op_ld32s_i64:
tcg_out_ldst(s, a0, a1, a2, LDSW);
break;
case INDEX_op_ld_i64:
tcg_out_ldst(s, a0, a1, a2, LDX);
break;
case INDEX_op_st_i64:
tcg_out_ldst(s, a0, a1, a2, STX);
break;
case INDEX_op_shl_i64:
c = SHIFT_SLLX;
do_shift64:
/* Limit immediate shift count lest we create an illegal insn. */
tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
break;
case INDEX_op_shr_i64:
c = SHIFT_SRLX;
goto do_shift64;
case INDEX_op_sar_i64:
c = SHIFT_SRAX;
goto do_shift64;
case INDEX_op_mul_i64:
c = ARITH_MULX;
goto gen_arith;
case INDEX_op_div_i64:
c = ARITH_SDIVX;
goto gen_arith;
case INDEX_op_divu_i64:
c = ARITH_UDIVX;
goto gen_arith;
case INDEX_op_ext_i32_i64:
case INDEX_op_ext32s_i64:
tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
break;
case INDEX_op_extu_i32_i64:
case INDEX_op_ext32u_i64:
tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
break;
case INDEX_op_extrl_i64_i32:
tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
break;
case INDEX_op_extrh_i64_i32:
tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
break;
case INDEX_op_brcond_i64:
tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
break;
case INDEX_op_setcond_i64:
tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
break;
case INDEX_op_movcond_i64:
tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
break;
case INDEX_op_add2_i64:
tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
const_args[4], args[5], const_args[5], false);
break;
case INDEX_op_sub2_i64:
tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
const_args[4], args[5], const_args[5], true);
break;
case INDEX_op_muluh_i64:
tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
break;
gen_arith:
tcg_out_arithc(s, a0, a1, a2, c2, c);
break;
gen_arith1:
tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
break;
case INDEX_op_mb:
tcg_out_mb(s, a0);
break;
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
default:
tcg_abort();
}
}