in vm/jitrino/src/jet/cg_meth.cpp [572:762]
void Compiler::gen_return(const CallSig& cs)
{
jtype retType = cs.ret_jt();
if (is_set(DBG_TRACE_EE)) {
gen_dbg_rt(true, "exiting : %s", meth_fname());
}
if (m_infoBlock.get_bc_size() == 1 && m_bc[0] == OPCODE_RETURN && !g_jvmtiMode) {
// empty method, nothing to do; the same is in gen_prolog();
// TODO: need to check and make sure whether it's absolutely legal
// to bypass monitors on such an empty method
// FIXME: this op9n bypasses JVMTI notifications
ret(m_ci.caller_pops() ? 0 : m_ci.size());
if (retType != jvoid) {
vpop();
}
return;
}
bool is_sync = meth_is_sync();
if (is_sync) {
unsigned stackFix = 0;
if (is_set(DBG_TRACE_CG)) {
dbg(";;>monitor_exit\n");
}
if (meth_is_static()) {
gen_call_vm(cs_jlc, rt_helper_class_2_jlc, 0, m_klass);
gen_save_ret(cs_jlc);
stackFix = gen_stack_to_args(true, cs_mon, 0);
//gen_call_vm(cs_mon, rt_helper_monitor_exit_static, 0, m_klass);
} else {
AR gr = valloc(jobj);
if (cs_mon.reg(0) != gr_x) {
if (cs_mon.size() != 0) {
assert(cs_mon.caller_pops());
alu(alu_sub, sp, cs_mon.size());
}
vpark(cs_mon.reg(0));
ld(jobj, cs_mon.reg(0), m_base, voff(m_stack.thiz()));
}
else {
assert(cs_mon.size() != 0);
alu(alu_sub, sp, cs_mon.size());
ld(jobj, gr, m_base, voff(m_stack.thiz()));
st(jobj, gr, sp, cs_mon.off(0));
}
//gen_call_vm(cs_mon, rt_helper_monitor_exit, 1);
}
gen_call_vm(cs_mon, rt_helper_monitor_exit, 1);
if (meth_is_static()) {
runlock(cs_mon);
if (stackFix != 0) {
alu(alu_sub, sp, stackFix);
}
}
if (is_set(DBG_TRACE_CG)) {
dbg(";;>~monitor_exit\n");
}
}
if (compilation_params.exe_notify_method_exit) {
// JVMTI helper takes pointer to return value and method handle
SYNC_FIRST(static const CallSig cs_ti_mexit(CCONV_HELPERS, jvoid, jobj, jobj));
// The call is a bit unusual, and is processed as follows:
// we load an address of the top of the operand stack into
// a temporary register, and then pass this value as pointer
// to return value. If method returns void, then we load address
//of top of the stack anyway.
Val retVal;
rlock(cs_ti_mexit);
Val retValPtr = Val(jobj, valloc(jobj));
rlock(retValPtr);
if (retType != jvoid) {
// Make sure the top item is on the memory
vswap(0);
if (is_big(retType)) {
vswap(1);
}
const Val& s = vstack(0);
assert(s.is_mem());
lea(retValPtr.as_opnd(), s.as_opnd());
}
else {
Opnd stackTop(jobj, m_base, voff(m_stack.unused()));
lea(retValPtr.as_opnd(), stackTop);
}
runlock(retValPtr);
AR ar = valloc(iplatf);
Opnd flag_addr(iplatf, ar);
mov(flag_addr,Opnd(iplatf,(int_ptr)rt_method_exit_flag_address));
Opnd mem(i16, ar, 0);
alu(alu_cmp, mem, Opnd(0));
unsigned br_off = br(z, 0, 0, taken);
Val vmeth(jobj, m_method);
gen_args(cs_ti_mexit, 0, &vmeth, &retValPtr);
gen_call_vm(cs_ti_mexit, rt_helper_ti_method_exit, cs_ti_mexit.count());
runlock(cs_ti_mexit);
patch(br_off, ip());
}
AR out_reg = cs.ret_reg(0);
if (is_f(retType)) {
if (out_reg == fp0) {
// On IA-32 always swap to memory first, then upload into FPU
vswap(0);
ld(retType, out_reg, m_base, vstack_off(0));
} else {
// Make sure the item is not immediate
Val op = vstack(0, vis_imm(0));
if (!op.is_reg() || op.reg() != out_reg) {
Opnd ret(retType, out_reg);
mov(ret, op.as_opnd());
}
}
}
else if (is_big(retType)) {
#ifdef _IA32_
vswap(0);
vswap(1);
AR out_reg1 = cs.ret_reg(1);
ld4(out_reg, m_base, vstack_off(0));
ld4(out_reg1, m_base, vstack_off(1));
#else
assert(false && "Unexpected case - 'big' type on EM64T");
#endif
}
else if (retType != jvoid) {
Val& op = vstack(0);
if (!op.is_reg() || op.reg() != out_reg) {
Opnd ret(retType, out_reg);
mov(ret, op.as_opnd());
}
}
if (retType != jvoid && is_set(DBG_TRACE_EE)) {
//TODO: the same code is in gen_save_ret() - extract into a
// separate method ?
push_all();
AR gtmp = gr0;
Opnd op = vstack(0, true).as_opnd();
st(jtmov(retType), op.reg(), m_base, voff(m_stack.scratch()));
ld(jobj, gtmp, m_base, voff(m_stack.scratch()));
if (cs_trace_arg.reg(0) != gr_x) {
if (cs_trace_arg.size() != 0) {
assert(cs_trace_arg.caller_pops());
alu(alu_sub, sp, cs_trace_arg.size());
}
mov(cs_trace_arg.reg(0), gtmp);
}
else {
assert(cs_trace_arg.size() != 0);
alu(alu_sub, sp, cs_trace_arg.size());
st4(gtmp, sp, cs_trace_arg.off(0));
}
Encoder::gen_args(cs_trace_arg, gtmp, 1, 2, -1, retType);
movp(gtmp, (void*)&dbg_trace_arg);
call(gtmp, cs_trace_arg, is_set(DBG_CHECK_STACK));
pop_all();
}
unsigned frameSize = m_stack.size();
// Restore callee-save regs
for (unsigned i=0; i<ar_num; i++) {
AR ar = _ar(i);
if (ar==sp || !is_callee_save(ar) || !m_global_rusage.test(i)) {
continue;
}
jtype jt = is_f(ar) ? dbl64 : jobj;
// Here, always use sp-based addressing - bp frame may be destroyed
// already by restoring bp.
ld(jt, ar, sp, frameSize+m_stack.spill(ar));
}
alu(alu_add, sp, frameSize);
ret(m_ci.caller_pops() ? 0 : m_ci.size());
//m_jframe->clear_stack();
if (retType != jvoid) {
// free up registers
vpop();
}
}