in vm/jitrino/src/jet/compiler.cpp [109:579]
JIT_Result Compiler::compile(Compile_Handle ch, Method_Handle method,
const OpenMethodExecutionParams& params)
{
compilation_params = params;
/*
compilation_params.exe_restore_context_after_unwind = true;
compilation_params.exe_provide_access_to_this = true;
//vm_properties_set_value("vm.jvmti.enabled", "true");
g_jvmtiMode = true;
*/
m_compileHandle = ch;
MethInfo::init(method);
// Currently use bp-based frame
m_base = bp;
// Will be used later, with sp-based frame
m_depth = 0;
//
// Check contract with VM
//
assert(!method_is_abstract(method) &&
"VM must not try to compile abstract method!");
assert(!method_is_native(method) &&
"VM must not try to compile native method!");
UNSAFE_REGION_START
// Non-atomic increment of compiled method counter.
// May affect accuracy of JIT logging or a special debug mode
// when a user specifies which range of methods accept/reject from compilation.
// Can't affect default JET execution mode.
STATS_SET_NAME_FILER(NULL);
m_methID = ++methodsSeen;
UNSAFE_REGION_END
unsigned compile_flags = defaultFlags;
initProfilingData(&compile_flags);
//
// the latest PMF machinery seems working without much overhead,
// let's try to have tracing functionality on by default
//
#if 1 //def JET_PROTO
// Ensure no memory problems exist on entrance
if (get_bool_arg("checkmem", false)) {
dbg_check_mem();
}
const char * lp;
//
// Process args, update flags if necessary
//
if (!get_bool_arg("bbp", true)) {
compile_flags &= ~JMF_BBPOLLING;
}
m_lazy_resolution = get_bool_arg("lazyResolution", true);
#ifdef _DEBUG
bool assertOnRecursion = get_bool_arg("assertOnRecursion", false);
if (assertOnRecursion) {
assert(Jitrino::getCompilationRecursionLevel() == 1);
}
#endif
//
// Debugging support
//
lp = get_arg("log", NULL);
if (lp != NULL) {
if (NULL != strstr(lp, "ct")) {
bool ct = false;
if (NULL != strstr(lp, "sum")) {
compile_flags |= DBG_TRACE_SUMM;
ct = true;
}
static const unsigned TRACE_CG =
DBG_DUMP_BBS | DBG_TRACE_CG |
DBG_TRACE_SUMM | DBG_DUMP_CODE;
if (NULL != strstr(lp, "cg")) {
compile_flags |= TRACE_CG;
ct = true;
}
if (NULL != strstr(lp, "layout")) {
compile_flags |= DBG_TRACE_LAYOUT;
ct = true;
}
if (NULL != strstr(lp, "code")) {
compile_flags |= DBG_DUMP_CODE;
ct = true;
}
if (!ct) {
// No category means 'code+sum'
compile_flags |= DBG_DUMP_CODE|DBG_TRACE_SUMM;
}
}
if (Log::log_rt().isEnabled()) {
if (NULL != strstr(lp, "rtsupp")) {
compile_flags |= DBG_TRACE_RT;
}
if (NULL != strstr(lp, "ee")) {
compile_flags |= DBG_TRACE_EE;
}
if (NULL != strstr(lp, "bc")) {
compile_flags |= DBG_TRACE_BC;
}
}
} // ~compLS.isEnabled()
//
// Accept or reject the method ?
//
bool accept = true;
if (g_acceptStartID != NOTHING && m_methID < g_acceptStartID) {
accept = false;
}
if (g_acceptEndID != NOTHING && m_methID > g_acceptEndID) {
accept = false;
}
if (g_rejectStartID != NOTHING && m_methID >= g_rejectStartID) {
accept = false;
}
if (g_rejectEndID != NOTHING && m_methID <= g_rejectEndID) {
accept = false;
}
lp = get_arg("reject", NULL);
if (lp != NULL && isalpha(lp[0])) {
accept = !to_bool(lp);
}
if (!accept) {
if (compile_flags & DBG_TRACE_SUMM) {
//dbg_trace_comp_start();
//dbg_trace_comp_end(false, "Due to accept or reject argument.");
}
return JIT_FAILURE;
}
//
// A special way to accept/reject method - list of method in file.
// May be useful to find problematic method in multi threaded
// compilation env when id-s get changed much.
//
static bool loadList = true;
static vector<string> data;
if (loadList) {
lp = get_arg("list", NULL);
if (lp != NULL) {
FILE * f = fopen(lp, "r");
if (f != NULL) {
char buf[1024*4];
while(NULL != fgets(buf, sizeof(buf)-1, f)) {
// Skip comments
if (buf[0] == '#') continue;
int len = (int)strlen(buf);
// Trim CRLF
if (len>=1 && buf[len-1]<=' ') { buf[len-1] = 0; }
if (len>=2 && buf[len-2]<=' ') { buf[len-2] = 0; }
data.push_back(buf);
}
fclose(f);
} // if f != NULL
else {
dbg("WARN: list option - can not open '%s'.\b", lp);
}
}
loadList = false;
}
bool found = data.size() == 0;
for (unsigned i=0; i<data.size(); i++) {
const string& s = data[i];
if (!strcmp(meth_fname(), s.c_str())) {
found = true;
break;
}
}
if (!found) {
if (compile_flags & DBG_TRACE_SUMM) {
//dbg_trace_comp_start();
//dbg_trace_comp_end(false, "Not in file list.");
}
return JIT_FAILURE;
}
//
// Only emulate compilation, without registering code in the VM ?
//
m_bEmulation = get_bool_arg("emulate", false);
//
// Check stack integrity ?
//
if (get_bool_arg("checkstack", false)) {
compile_flags |= DBG_CHECK_STACK;
}
//
// Insert software breakpoint at specified place of method ?
//
dbg_break_pc = NOTHING;
lp = get_arg("brk", NULL);
if (lp == NULL) { lp = get_arg("break", NULL); };
if (lp != NULL) {
// PC specified where to insert breakpoint into ...
if (isdigit(lp[0])) {
dbg_break_pc = atoi(lp);
}
else {
// no PC specified - break at entrance.
compile_flags |= DBG_BRK;
}
}
#endif // ~JET_PROTO
if (compile_flags & DBG_TRACE_SUMM) {
dbg_trace_comp_start();
}
Encoder::m_trace = (compile_flags & DBG_TRACE_CG);
if (NULL == rt_helper_throw) {
// The very first call of ::compile(), initialize
// runtime constants - addresses of helpers, offsets, etc
initStatics();
}
m_max_native_stack_depth = 0;
m_bc = method_get_bytecode(m_method);
unsigned bc_size = (unsigned)method_get_bytecode_length(m_method);
unsigned num_locals = method_get_max_locals(m_method);
unsigned max_stack = method_get_max_stack(m_method) + NATIVE_STACK_SIZE_2_THROW_SYN_EXC;
// Input arguments
::std::vector<jtype> inargs;
get_args_info(m_method, inargs, &m_retType);
m_ci.init(CCONV_MANAGED, inargs);
unsigned num_input_slots = count_slots(inargs);
m_argSlots = num_input_slots;
m_argids.resize(num_input_slots); //[in_slots];
m_ra.resize(num_locals, ar_x);
//
m_codeStream.init((unsigned)(bc_size*NATIVE_CODE_SIZE_2_BC_SIZE_RATIO));
m_stack.init(num_locals, max_stack, num_input_slots);
// We need to report 'this' additionally for the following cases:
// - non-static sync methods - to allow VM to call monitor_exit() for
// abrupt exit
// - constructors of classes with class_is_throwable == true
// to allow correct handling of stack trace in VM (see
// stack_trace.cpp + com_openintel_drl_vm_VMStack.cpp:
// Java_com_openintel_drl_vm_VMStack_getStackState.
//
if (!meth_is_static() && (meth_is_sync() || meth_is_exc_ctor())) {
compile_flags |= JMF_REPORT_THIS;
}
// Always report 'this' if we're asked about this explicitly
if (compilation_params.exe_provide_access_to_this && !meth_is_static()) {
compile_flags |= JMF_REPORT_THIS;
}
m_infoBlock.init(bc_size, max_stack, num_locals, num_input_slots,
compile_flags);
m_infoBlock.set_compile_params(compilation_params);
bool eh_ok = comp_resolve_ehandlers();
if (!eh_ok) {
// At least on of the exception handlers classes was not resolved:
// unable to resolve class of Exception => will be unable to register
// exception handlers => can't generate code et all => stop here
// TODO - might want to [re]consider and may generate LinkageError
// and throw it at runtime.
if (is_set(DBG_TRACE_SUMM)) {
dbg_trace_comp_end(false, "ehandler.resolve");
}
m_infoBlock.release();
return JIT_FAILURE;
}
//
// Initialization done, collect statistics
//
UNSAFE_REGION_START
STATS_INC(Stats::methodsCompiled, 1);
STATS_INC(Stats::methodsWOCatchHandlers, m_handlers.size() ? 0 : 1);
//
STATS_MEASURE_MIN_MAX_VALUE(bc_size, m_infoBlock.get_bc_size(), meth_fname());
STATS_MEASURE_MIN_MAX_VALUE(jstack, max_stack, meth_fname());
STATS_MEASURE_MIN_MAX_VALUE(locals, num_locals, meth_fname());
UNSAFE_REGION_END
//
// ~Stats
//
m_insts.alloc(bc_size, true);
//
// Initialization ends,
// Phase 1 - decoding instructions, finding basic blocks.
//
comp_parse_bytecode();
comp_alloc_regs();
// Statistics:: number of basic blocks
STATS_MEASURE_MIN_MAX_VALUE(bbs, (unsigned)m_bbs.size(), meth_fname());
if (is_set(DBG_DUMP_BBS)) {
dbg_dump_bbs();
}
//
// Phase 2 - code generation.
//
SmartPtr<BBState> allStates;
allStates.alloc((unsigned)m_bbs.size());
unsigned c = 0;
for (BBMAP::iterator i=m_bbs.begin(); i != m_bbs.end(); i++, c++) {
m_bbStates[i->first] = &allStates[c];
}
// Generate the whole code - will recursively generate all the
// reachable code, except the exception handlers ...
comp_gen_code_bb(0);
// ... now, generate exception handlers ...
for (unsigned i=0; i<m_handlers.size(); i++) {
comp_gen_code_bb(m_handlers[i].handler);
if (isSOEHandler(m_handlers[i].klass)) {
hasSOEHandlers=true;
}
}
// ... and finally, generate prolog.
// Fake BB data - it's used in gen_prolog()
BBInfo bbinfo;
unsigned prolog_ipoff = bbinfo.ipoff = ipoff();
BBState bbstate;
bbstate.jframe.init(m_infoBlock.get_stack_max(),
m_infoBlock.get_num_locals());
m_bbstate = &bbstate;
m_bbinfo = &bbinfo;
m_jframe = &bbstate.jframe;
rclear();
gen_prolog();
unsigned prolog_size = ipoff() - prolog_ipoff;
if (is_set(DBG_TRACE_CG)) {
dbg_dump_code(m_codeStream.data() + prolog_ipoff,
prolog_size, "prolog");
}
//
// phase 2 end.
// Register code and related info in the VM
//
// *************
// * LOCK HERE *
// *************
g_compileLock.lock();
if (method_get_code_block_size_jit(m_method, m_hjit) != 0) {
// the code generated already
STATS_INC(Stats::methodsCompiledSeveralTimes, 1);
g_compileLock.unlock(); /* Unlock here */
m_infoBlock.release();
if (get_bool_arg("checkmem", false)) {
dbg_check_mem();
}
return JIT_SUCCESS;
}
const unsigned total_code_size = m_codeStream.size();
if (m_bEmulation) {
m_vmCode = (char*)malloc(total_code_size);
}
else {
m_vmCode = (char*)method_allocate_code_block(m_method, m_hjit,
total_code_size,
16/*fixme aligment*/,
CodeBlockHeatDefault,
0, CAA_Allocate);
m_infoBlock.set_code_start(m_vmCode);
m_infoBlock.set_code_len(total_code_size);
}
//
// Copy and reposition code from m_codeStream into the allocated buf.
//
comp_layout_code(prolog_ipoff, prolog_size);
STATS_MEASURE_MIN_MAX_VALUE(code_size, total_code_size, meth_fname());
STATS_MEASURE_MIN_MAX_VALUE(native_per_bc_ratio,
(m_infoBlock.get_bc_size() == 0) ?
0 : total_code_size/m_infoBlock.get_bc_size(),
meth_fname());
#ifdef _DEBUG
// At this point, the codeStream content is completely copied into the
// 'codeBlock', thus no usage of m_codeStream beyond this point.
memset(m_codeStream.data(), 0xCC, m_codeStream.size());
#endif
//
// runtime data. must be initialized before code patching
//
//register profiler counters mapping info if present
std::vector<U_32> profiler_counters_vec; //will automatically be deleted on exit from this method
if (!m_profileCountersMap.empty()) {
m_infoBlock.num_profiler_counters = (U_32)m_profileCountersMap.size();
profiler_counters_vec.resize(m_infoBlock.num_profiler_counters, 0);
m_infoBlock.profiler_counters_map = &profiler_counters_vec.front();
for (size_t i =0; i<m_profileCountersMap.size(); i++) {
ProfileCounterInfo& info = m_profileCountersMap[i];
U_32 offset = ProfileCounterInfo::getInstOffset(info.offsetInfo) + (info.bb->addr - m_vmCode);
U_32 offsetInfo = ProfileCounterInfo::createOffsetInfo(ProfileCounterInfo::getInstSize(info.offsetInfo), offset);
m_infoBlock.profiler_counters_map[i]=offsetInfo;
}
}
unsigned data_size = m_infoBlock.get_total_size();
char * pdata;
if (m_bEmulation) {
pdata = (char*)malloc(data_size);
}
else {
pdata = (char*)method_allocate_info_block(m_method, m_hjit,
data_size);
}
m_infoBlock.save(pdata);
//
// Finalize addresses
//
comp_patch_code();
//
// register exception handlers
//
if (m_bEmulation) {
// no op
}
else {
comp_set_ehandlers();
}
// ***************
// * UNLOCK HERE *
// ***************
g_compileLock.unlock();
STATS_MEASURE_MIN_MAX_VALUE(patchItemsToBcSizeRatioX1000,
patch_count()*1000/bc_size, meth_fname());
if (is_set(DBG_DUMP_CODE)) {
dbg_dump_code_bc(m_vmCode, total_code_size);
}
if (is_set(DBG_TRACE_SUMM)) {
dbg_trace_comp_end(true, "ok");
}
m_infoBlock.release();
// Ensure no memory problems appeared during compilation
if (get_bool_arg("checkmem", false)) {
dbg_check_mem();
}
if (m_bEmulation) {
free(m_vmCode);
free(pdata);
return JIT_FAILURE;
}
return JIT_SUCCESS;
}