in vm/vmcore/src/jit/compile.cpp [204:537]
NativeCodePtr compile_create_lil_jni_stub(Method_Handle method, void* func, NativeStubOverride nso)
{
ASSERT_NO_INTERPRETER;
const Class_Handle clss = method->get_class();
bool is_static = method->is_static();
bool is_synchronised = method->is_synchronized();
Method_Signature_Handle msh = method_get_signature(method);
unsigned num_args = method->get_num_args();
Type_Info_Handle ret_tih = method_ret_type_get_type_info(msh);
VM_Data_Type ret_type = type_info_get_type(ret_tih);
unsigned i;
unsigned num_ref_args = 0; // among original args, does not include jclass for static methods
for(i=0; i<num_args; i++)
if (is_reference(method_args_get_type_info(msh, i))) num_ref_args++;
//***** Part 1: Entry, Stats, Override, push m2n, allocate space for handles
LilCodeStub* cs = lil_parse_code_stub("entry 0:managed:%0m;",
method);
assert(cs);
// Increment stats (total number of calls)
#ifdef VM_STATS
cs = lil_parse_onto_end(cs,
"inc [%0i:pint];",
&((Method*)method)->num_accesses);
assert(cs);
#endif //VM_STATS
// Do stub override here
if (nso) cs = nso(cs, method);
assert(cs);
// Increment stats (number of nonoverridden calls)
#ifdef VM_STATS
cs = lil_parse_onto_end(cs,
"inc [%0i:pint];",
&((Method*)method)->num_slow_accesses);
assert(cs);
#endif
// Push M2nFrame
cs = lil_parse_onto_end(cs, "push_m2n %0i, %1i, handles; locals 3;",
method, (POINTER_SIZE_INT)FRAME_JNI);
assert(cs);
// Allocate space for handles
unsigned number_of_object_handles = num_ref_args + (is_static ? 1 : 0);
cs = oh_gen_allocate_handles(cs, number_of_object_handles, "l0", "l1");
assert(cs);
//***** Part 2: Initialize object handles
if (is_static) {
void *jlc = clss->get_class_handle();
cs = lil_parse_onto_end(cs,
//"ld l1,[%0i:pint];"
"ld l1,[%0i:ref];",
jlc);
assert(cs);
cs = oh_gen_init_handle(cs, "l0", 0, "l1", false);
assert(cs);
} else {
cs = oh_gen_init_handle(cs, "l0", 0, "i0", true);
}
// The remaining handles are for the proper arguments (not including this)
// Loop over the arguments, skipping 0th argument for instance methods. If argument is a reference, generate code
unsigned hn = 1;
for(i=(is_static?0:1); i<num_args; i++) {
if (is_reference(method_args_get_type_info(msh, i))) {
char buf[20];
sprintf(buf, "i%d", i);
cs = oh_gen_init_handle(cs, "l0", hn, buf, true);
assert(cs);
hn++;
}
}
//***** Part 3: Synchronize
if (is_synchronised) {
if (is_static) {
cs = lil_parse_onto_end(cs,
"out stdcall:pint:pint;"
"o0=%0i;"
"call %1i;"
"out stdcall:pint:void;"
"o0=r;"
"call %2i;",
clss,
lil_npc_to_fp(vm_helper_get_addr(VM_RT_CLASS_2_JLC)),
lil_npc_to_fp(vm_helper_get_addr(VM_RT_MONITOR_ENTER)));
assert(cs);
} else {
cs = lil_parse_onto_end(cs,
"out stdcall:ref:void;"
"o0=i0;"
"call %0i;",
lil_npc_to_fp(vm_helper_get_addr(VM_RT_MONITOR_ENTER)));
assert(cs);
}
}
//***** Call JVMTI MethodEntry
DebugUtilsTI* ti = VM_Global_State::loader_env->TI;
if (ti->isEnabled() &&
ti->get_global_capability(DebugUtilsTI::TI_GC_ENABLE_METHOD_ENTRY))
{
cs = lil_parse_onto_end(cs,
"out platform:pint:void;"
"o0=%0i:pint;"
"call %1i;",
(jmethodID)method,
jvmti_process_method_entry_event);
assert(cs);
}
//***** Part 4: Enable GC
cs = lil_parse_onto_end(cs,
"out platform::void;"
"call %0i;",
hythread_suspend_enable);
assert(cs);
//***** Part 5: Set up arguments
// Setup outputs, set JNIEnv, set class/this handle
cs = lil_parse_onto_end(cs,
"out jni:%0j;"
"l1=ts;"
"ld o0,[l1 + %1i:pint];"
"o1=l0+%2i;",
method,
(POINTER_SIZE_INT)APR_OFFSETOF(VM_thread, jni_env),
oh_get_handle_offset(0));
assert(cs);
// Loop over arguments proper, setting rest of outputs
unsigned int arg_base = 1 + (is_static ? 1 : 0);
hn = 1;
for(i=(is_static?0:1); i<num_args; i++) {
if (is_reference(method_args_get_type_info(msh, i))) {
POINTER_SIZE_INT handle_offset = oh_get_handle_offset(hn);
REFS_RUNTIME_SWITCH_IF
#ifdef REFS_RUNTIME_OR_COMPRESSED
cs = lil_parse_onto_end(cs,
"jc i%0i=%1i:ref,%n;"
"o%2i=l0+%3i;"
"j %o;"
":%g;"
"o%4i=0;"
":%g;",
i,
VM_Global_State::loader_env->managed_null,
arg_base+i, handle_offset, arg_base+i);
#endif // REFS_RUNTIME_OR_COMPRESSED
REFS_RUNTIME_SWITCH_ELSE
#ifdef REFS_RUNTIME_OR_UNCOMPRESSED
cs = lil_parse_onto_end(cs,
"jc i%0i=0:ref,%n;"
"o%1i=l0+%2i;"
"j %o;"
":%g;"
"o%3i=0;"
":%g;",
i,
arg_base+i, handle_offset,
arg_base+i);
#endif // REFS_RUNTIME_OR_UNCOMPRESSED
REFS_RUNTIME_SWITCH_ENDIF
hn++;
} else {
cs = lil_parse_onto_end(cs, "o%0i=i%1i;", arg_base+i, i);
}
assert(cs);
}
//***** Part 6: Call
cs = lil_parse_onto_end(cs,
"call %0i;",
func);
assert(cs);
//***** Part 7: Save return, widening if necessary
switch (ret_type) {
case VM_DATA_TYPE_VOID:
break;
case VM_DATA_TYPE_INT32:
cs = lil_parse_onto_end(cs, "l1=r;");
break;
case VM_DATA_TYPE_BOOLEAN:
cs = lil_parse_onto_end(cs, "l1=zx1 r;");
break;
case VM_DATA_TYPE_INT16:
cs = lil_parse_onto_end(cs, "l1=sx2 r;");
break;
case VM_DATA_TYPE_INT8:
cs = lil_parse_onto_end(cs, "l1=sx1 r;");
break;
case VM_DATA_TYPE_CHAR:
cs = lil_parse_onto_end(cs, "l1=zx2 r;");
break;
default:
cs = lil_parse_onto_end(cs, "l1=r;");
break;
}
assert(cs);
//***** Part 8: Disable GC
cs = lil_parse_onto_end(cs,
"out platform::void;"
"call %0i;",
hythread_suspend_disable);
assert(cs);
// Exception offsets
POINTER_SIZE_INT eoo = (POINTER_SIZE_INT)&((VM_thread*)0)->thread_exception.exc_object;
POINTER_SIZE_INT eco = (POINTER_SIZE_INT)&((VM_thread*)0)->thread_exception.exc_class;
//***** Call JVMTI MethodExit
if (ti->isEnabled() &&
ti->get_global_capability(DebugUtilsTI::TI_GC_ENABLE_METHOD_EXIT))
{
cs = lil_parse_onto_end(cs,
"out platform:pint,g1,g8:void;"
"l2=ts;"
"ld l2,[l2+%0i:ref];"
"jc l2!=0,_mex_exn_raised;"
"l2=ts;"
"ld l2,[l2+%1i:ref];"
"jc l2!=0,_mex_exn_raised;"
"o1=%2i:g1;"
"o2=l1:g8;"
"j _mex_exn_cont;"
":_mex_exn_raised;"
"o1=%3i:g1;"
"o2=0:g8;"
":_mex_exn_cont;"
"o0=%4i:pint;"
"call %5i;",
eoo,
eco,
(POINTER_SIZE_INT)JNI_FALSE,
(POINTER_SIZE_INT)JNI_TRUE,
(jmethodID)method,
jvmti_process_method_exit_event);
assert(cs);
}
//***** Part 9: Synchronize
if (is_synchronised) {
if (is_static) {
cs = lil_parse_onto_end(cs,
"out stdcall:pint:pint;"
"o0=%0i;"
"call %1i;"
"out stdcall:pint:void;"
"o0=r;"
"call %2i;",
clss,
lil_npc_to_fp(vm_helper_get_addr(VM_RT_CLASS_2_JLC)),
lil_npc_to_fp(vm_helper_get_addr(VM_RT_MONITOR_EXIT)));
} else {
cs = lil_parse_onto_end(cs,
"ld l0,[l0+%0i:ref];"
"out stdcall:ref:void; o0=l0; call %1i;",
oh_get_handle_offset(0),
lil_npc_to_fp(vm_helper_get_addr(VM_RT_MONITOR_EXIT)));
}
assert(cs);
}
//***** Part 10: Unhandle the return if it is a reference
if (is_reference(ret_tih)) {
cs = lil_parse_onto_end(cs,
"jc l1=0,ret_done;"
"ld l1,[l1+0:ref];"
":ret_done;");
#ifdef REFS_RUNTIME_OR_COMPRESSED
REFS_RUNTIME_SWITCH_IF
cs = lil_parse_onto_end(cs,
"jc l1!=0,done_translating_ret;"
"l1=%0i:ref;"
":done_translating_ret;",
VM_Global_State::loader_env->managed_null);
REFS_RUNTIME_SWITCH_ENDIF
#endif // REFS_RUNTIME_OR_UNCOMPRESSED
assert(cs);
}
//***** Part 11: Rethrow exception
cs = lil_parse_onto_end(cs,
"l0=ts;"
"ld l2,[l0+%0i:ref];"
"jc l2!=0,_exn_raised;"
"ld l2,[l0+%1i:ref];"
"jc l2=0,_no_exn;"
":_exn_raised;"
"m2n_save_all;"
"out platform::void;"
"call.noret %2i;"
":_no_exn;",
eoo, eco, exn_rethrow);
assert(cs);
//***** Part 12: Restore return variable, pop_m2n, return
if (ret_type != VM_DATA_TYPE_VOID) {
cs = lil_parse_onto_end(cs, "r=l1;");
assert(cs);
}
cs = lil_parse_onto_end(cs,
"pop_m2n;"
"ret;");
assert(cs);
//***** Now generate code
assert(lil_is_valid(cs));
NativeCodePtr addr = LilCodeGenerator::get_platform()->compile(cs, clss->get_class_loader()->GetCodePool());
#ifndef NDEBUG
char buf[100];
apr_snprintf(buf, sizeof(buf)-1, "jni_stub.%s::%s", clss->get_name()->bytes,
method->get_name()->bytes);
DUMP_STUB(addr, buf, lil_cs_get_code_size(cs));
#endif
#ifdef VM_STATS
VM_Statistics::get_vm_stats().jni_stub_bytes += lil_cs_get_code_size(cs);
#endif
lil_free_code_stub(cs);
return addr;
} // compile_create_lil_jni_stub