in vm/jitrino/src/jet/compiler.cpp [928:1155]
bool Compiler::comp_gen_insts(unsigned pc, unsigned parentPC,
unsigned jsr_lead)
{
assert(m_bbs.find(pc) != m_bbs.end());
BBInfo& bbinfo = m_bbs[pc];
if (bbinfo.processed) {
if (bbinfo.jsr_target) {
// we're processing JSR subroutine
if (jsr_lead != NOTHING) {
assert(jsr_lead == pc);
// Simply load the state back to the parent's
BBState* prevState = m_bbStates[parentPC];
// assert(m_jsrStates.find(jsr_lead) != m_jsrStates.end());
if(m_jsrStates.find(jsr_lead) == m_jsrStates.end()) {
// There can be a specific testcase with jsr without respective ret
// In this case we can try to continue if there is a bb_State for jsr_lead
// This is a temporary solution. HARMONY-4740 is devoted to the complete one.
assert(m_bbStates.find(jsr_lead) != m_bbStates.end());
} else {
const BBState* jsrState = m_jsrStates[jsr_lead];
//prevState.jframe.init(&jsrState.jframe);
*prevState = *jsrState;
}
}
else {
// we have a fall through (and not through a JSR) path
// to a subroutine
// do nothing here - we only need to return the state
// back for JSR
}
}
return false;
}
BBState* pState = m_bbStates[pc];
BBState * parentState;
{
const BBInfo& parentBB = m_bbs[parentPC];
// If we see that parent block was a JSR subroutine, this may mean
// that the parent block ended with a JSR call, and then
// 'parentPC' of this block was substituted (see the appropriate
// code in comp_gen_code_bb()).
// So, in this case we must use the state after the JSR subroutine.
// The 'jsr_lead != parentPC' prevents from taking state from m_jsrStates
// when the parentPC is the real parent, that is in a JSR subroutine
// with several blocks.
if (parentBB.jsr_target && jsr_lead != parentPC && jsr_lead != pc) {
// There can be a specific testcase with jsr without respective ret
// In this case we can try to continue if there is a bb_State for parentPC
// This is a temporary solution. HARMONY-4740 is devoted to the complete one.
// assert(m_jsrStates.find(parentPC) != m_jsrStates.end());
if(m_jsrStates.find(parentPC) != m_jsrStates.end()) {
parentState = m_jsrStates[parentPC];
} else {
parentState = m_bbStates[parentPC];
}
}
else {
parentState = m_bbStates[parentPC];
}
}
JInst& bbhead = m_insts[pc];
if (pc != parentPC) {
if (pState != parentState) {
if (bbhead.ref_count > 1) {
pState->jframe = parentState->jframe;
}
else {
*pState = *parentState;
}
}
}
else {
pState->jframe.init(m_infoBlock.get_stack_max(),
m_infoBlock.get_num_locals());
}
BBState& bbstate = *pState;
bbinfo.processed = true;
m_jframe = &bbstate.jframe;
m_bbstate = &bbstate;
m_bbinfo = &bbinfo;
m_pc = pc;
unsigned bb_ip_start = bbinfo.ipoff = m_codeStream.ipoff();
//
// If there are several execution paths merged on this basic block,
// then we can not predict many things, including, but not limited to
// type of of local variables, state of stack items, what stack depth
// was saved last, etc. So, clearing all this stuff out.
//
if (bbinfo.ehandler || bbhead.ref_count > 1) {
bbstate.clear();
}
if (is_set(DBG_CHECK_STACK)){
gen_dbg_check_bb_stack();
}
if (bbinfo.ehandler && pc == parentPC) {
// This is a 'normal' handler, unreached via fall-through so far
// (otherwise parent bb must leave stack in consistent state).
// Here, we invoke gen_save_ret() because this is how the idea of
// exception handlers works in DRL VM:
// Loosely speaking, calling 'throw_<whatever>' is like a regular
// function call. The only difference is that the return point is
// at another address, not at the next instruction - we're
// 'returning' to the proper exception handler.
//
// That's why the exception object acts like a return value - for
// example on IA32 it's in EAX.
//
SYNC_FIRST(static const CallSig cs(CCONV_MANAGED, jobj));
gen_save_ret(cs);
}
if (is_set(DBG_TRACE_CG)) {
dbg("\n");
dbg(";; ======================================================\n");
dbg(";; bb.ref.count=%d%s%s savedStackDepth=%d stackMask=0x%X %s"
" jsr_lead=%d\n",
bbhead.ref_count, bbinfo.ehandler ? " ehandler " : "",
bbinfo.jsr_target ? " #JSR# " : "",
bbstate.stack_depth, bbstate.stack_mask,
bbstate.stack_mask_valid ? "" : "*mask.invalid*", jsr_lead);
if (bb_ip_start != m_codeStream.ipoff()) {
dbg_dump_code(m_codeStream.data() + bb_ip_start,
m_codeStream.ipoff()-bb_ip_start, "bb.head");
}
dbg(";; ======================================================\n");
}
gen_bb_enter();
#ifdef _DEBUG
vcheck();
#endif
const unsigned bc_size = m_infoBlock.get_bc_size();
unsigned next_pc = bbinfo.start;
bool last = false;
do {
// read out instruction to process
m_pc = next_pc;
m_curr_inst = &m_insts[m_pc];
next_pc = m_insts[m_pc].next;
last = next_pc>=bc_size || (m_insts[next_pc].is_set(OPF_STARTS_BB));
if (last) {
bbinfo.last_pc = m_pc;
bbinfo.next_bb = next_pc;
}
unsigned inst_code_start = m_codeStream.ipoff();
unsigned inst_code_dump_start = inst_code_start;
if (is_set(DBG_TRACE_CG)) {
dbg_dump_state("before", &bbstate);
// print an opcode
dbg(";; %-30s\n", toStr(m_insts[m_pc], true).c_str());
}
if (is_set(DBG_TRACE_BC)) {
gen_dbg_rt(true, "//%s@%u", meth_fname(), m_pc);
inst_code_dump_start = m_codeStream.ipoff();
}
#ifdef JET_PROTO
if (dbg_break_pc == m_pc) {
gen_brk();
}
#endif
STATS_INC(Stats::opcodesSeen[m_insts[m_pc].opcode], 1);
handle_inst();
#ifdef _DEBUG
vcheck();
#endif
unsigned inst_code_end = m_codeStream.ipoff();
if (g_jvmtiMode && (inst_code_end == inst_code_dump_start)) {
// XXX, FIXME: quick fix for JVMTI testing:
// if bytecode did not produce any native code, then add a fake
// NOP, so every BC instruction has its own separate native
// address
ip(EncoderBase::nops(ip(), 1));
inst_code_end = m_codeStream.ipoff();
}
unsigned inst_code_dump_size = inst_code_end - inst_code_dump_start;
unsigned bb_off = inst_code_start - bb_ip_start;
// store a native offset inside the basic block for now,
// this will be adjusted in comp_layout_code(), by adding the BB's
// start address
for (unsigned i=m_pc; i<next_pc; i++) {
m_infoBlock.set_code_info(i, (const char *)(int_ptr)bb_off);
}
if (m_infoBlock.get_flags() & DBG_TRACE_CG) {
// disassemble the code
dbg_dump_code(m_codeStream.data() + inst_code_dump_start,
inst_code_dump_size, NULL);
}
// no one should change m_pc, it's used right after the loop to get
// the same JInst back again
//assert(jinst.pc == m_pc);
} while(!last);
bbinfo.last_pc = m_pc;
bbinfo.next_bb = next_pc;
const JInst& jinst = m_insts[m_pc];
unsigned bb_code_end = m_codeStream.ipoff();
bbinfo.code_size = bb_code_end - bb_ip_start;
//
// We just finished JSR subroutine - store its state for further use
//
if (jinst.opcode == OPCODE_RET) {
assert(jsr_lead != NOTHING);
assert(m_insts[jsr_lead].ref_count>0);
assert(m_bbs[jsr_lead].processed);
m_jsrStates[jsr_lead] = pState;
}
else if (jsr_lead != NOTHING &&
(jinst.opcode == OPCODE_ATHROW || jinst.is_set(OPF_RETURN))) {
assert(m_insts[jsr_lead].ref_count>0);
assert(m_bbs[jsr_lead].processed);
m_jsrStates[jsr_lead] = pState;
}
return true;
}