void CodeGen::gen_invoke()

in vm/jitrino/src/jet/cg_meth.cpp [764:974]


void CodeGen::gen_invoke(JavaByteCodes opcod, Method_Handle meth, unsigned short cpIndex,
                         const ::std::vector<jtype> &args, jtype retType)
{
    const unsigned slots = count_slots(args);
    // where (stack depth) 'this' is stored for the method being invoked 
    // (if applicable)
    const unsigned thiz_depth = slots - 1;

    const JInst& jinst = *m_curr_inst;
    
    CallSig cs(CCONV_MANAGED, retType, args);
    for (unsigned i=0; i<cs.count(); i++) {
        AR ar = cs.reg(i);
        if (ar == ar_x) continue;
        vpark(ar);
    }
    unsigned stackFix = 0;
    rlock(cs);
    
    const bool is_static = opcod == OPCODE_INVOKESTATIC;
    if (meth == NULL && !m_lazy_resolution) {
        runlock(cs); // was just locked above - unlock
        gen_call_throw(ci_helper_linkerr, rt_helper_throw_linking_exc, 0,
                       m_klass, jinst.op0, jinst.opcode);
        stackFix = gen_stack_to_args(true, cs, 0); // pop out args
        runlock(cs); // due to gen_stack_to_args()
        gen_gc_stack(-1, true);
        if (retType != jvoid) {
            gen_save_ret(cs);
        }
        if (stackFix != 0) {
            alu(alu_sub, sp, stackFix);
        }
        return;
    }
    
    if (meth == NULL) {
        //lazy resolution mode: get method addr and call it.
        assert(m_lazy_resolution);
        AR gr_ret = ar_x;
        //1. get method address
        if (opcod == OPCODE_INVOKESTATIC || opcod == OPCODE_INVOKESPECIAL) {
            SYNC_FIRST(static const CallSig cs_get_is_addr(CCONV_HELPERS, iplatf, iplatf, i32));
            rlock(cs_get_is_addr);

            if (!is_static)
            {
                Val &thiz = vstack(thiz_depth, false);
                // For invokeSPECIAL, we're using indirect address provided by 
                // the VM. This means we do not read vtable, which means no 
                // memory access, so we can't use HW checks - have to use 
                // explicit one. Not a big loss, as the INVOKESPECIAL mostly
                // comes right after NEW which guarantees non-null.
                // in lazy resolution mode we must do manual check and provide helper with
                // non-null results.
                gen_check_null(thiz, false);
            }

            char* helper = opcod == OPCODE_INVOKESTATIC ?  rt_helper_get_invokestatic_addr_withresolve :
                                                           rt_helper_get_invokespecial_addr_withresolve;
            vpark(); 
            gen_call_vm(cs_get_is_addr, helper, 0, m_klass, cpIndex);
            runlock(cs_get_is_addr);
            gr_ret = cs_get_is_addr.ret_reg(0);
        } else {
            assert(opcod == OPCODE_INVOKEVIRTUAL || opcod == OPCODE_INVOKEINTERFACE);
            SYNC_FIRST(static const CallSig cs_get_iv_addr(CCONV_HELPERS, iplatf, iplatf, i32, jobj));
            rlock(cs_get_iv_addr);

            Val &thiz = vstack(thiz_depth, false);
            gen_check_null(thiz, false);

            char * helper = opcod == OPCODE_INVOKEVIRTUAL ? rt_helper_get_invokevirtual_addr_withresolve : 
                                                            rt_helper_get_invokeinterface_addr_withresolve;
            // setup constant parameters first,
            Val vclass(iplatf, m_klass);
            Val vcpIdx(cpIndex);
            vpark();
            gen_args(cs_get_iv_addr, 0, &vclass, &vcpIdx, &thiz);
            gen_call_vm(cs_get_iv_addr, helper, 3);
            runlock(cs_get_iv_addr);
            gr_ret = cs_get_iv_addr.ret_reg(0);
        } 
        rlock(gr_ret); //WARN: call addr is in gr_ret -> lock it

        //2.  call java method
        stackFix = gen_stack_to_args(true, cs, 0);
        vpark();
        gen_gc_stack(-1, true);
        
        AR gr = valloc(iplatf);
        ld(jobj, gr, gr_ret); //load indirect addr
        call(gr, cs, is_set(DBG_CHECK_STACK));
        runlock(gr_ret);
    } 
    else if (opcod == OPCODE_INVOKEINTERFACE) {
        // if it's INVOKEINTERFACE, then first resolve it
        Class_Handle klass = method_get_class(meth);
        const CallSig cs_vtbl(CCONV_HELPERS, iplatf, jobj, jobj);
        rlock(cs_vtbl);

        Val &thiz = vstack(thiz_depth, true);
        rlock(thiz);
        gen_check_null(thiz, true);

        // Prepare args for ldInterface helper
        if (cs_vtbl.reg(0) == gr_x) {
            assert(cs_vtbl.size() != 0);
            alu(alu_sub, sp, cs_vtbl.size());
            st(jobj, thiz.reg(), sp, cs_vtbl.off(0));
        }
        else {
            if (cs_vtbl.size() != 0) {
                assert(cs_vtbl.caller_pops());
                alu(alu_sub, sp, cs_vtbl.size());                    
            }
            mov(cs_vtbl.get(0), thiz.as_opnd());
        }
        runlock(thiz);
        gen_call_vm(cs_vtbl, rt_helper_get_vtable, 1, klass);
        AR gr_ret = cs_vtbl.ret_reg(0);
        runlock(cs_vtbl);
        //
        // Method's vtable is in gr_ret now, prepare stack
        //
        rlock(gr_ret);
        //st(jobj, gr_ret, m_base, voff(m_stack.scratch()));
        stackFix = gen_stack_to_args(true, cs, 0);
        vpark();
        gen_gc_stack(-1, true);
        unsigned offset = method_get_vtable_offset(meth);
        //ld(jobj, gr_ret, m_base, voff(m_stack.scratch()));
        runlock(gr_ret);
        ld(jobj, gr_ret, gr_ret, offset);
        call(gr_ret, cs, is_set(DBG_CHECK_STACK));
    }
    else if (opcod == OPCODE_INVOKEVIRTUAL) {
        Val &thiz = vstack(thiz_depth, true);
        rlock(thiz);

        stackFix = gen_stack_to_args(true, cs, 0);
        vpark();
        gen_gc_stack(-1, true);
        // Check for null here - we just spilled all the args and 
        // parked all the registers, so we have a chance to use HW NPE 
        gen_check_null(thiz, true);

        AR gr = valloc(jobj);
        size_t offset = method_get_vtable_offset(meth);
        Opnd ptr;

        if (g_vtbl_squeeze) {
            ld4(gr, thiz.reg(), rt_vtable_offset);
            AR gr_vtbase = valloc(jobj);
            movp(gr_vtbase, (char*)VTBL_BASE+offset);
            alu(jobj, alu_add, gr, gr_vtbase);
            ptr = Opnd(jobj, gr, 0);
        }
        else {
            ld(jobj, gr, thiz.reg(), rt_vtable_offset);
            ptr = Opnd(jobj, gr, (int)offset);
        }
        call(ptr, cs, is_set(DBG_CHECK_STACK));
        runlock(thiz);
    }
    else {
        Val *thiz = NULL;

        if (!is_static)
            thiz = &vstack(thiz_depth, true);

        stackFix = gen_stack_to_args(true, cs, 0);
        vpark();
        gen_gc_stack(-1, true);

        if (!is_static)
            // Check for null here - we just spilled all the args and 
            // parked all the registers, so we have a chance to use HW NPE 
            // For invokeSPECIAL, we're using indirect address provided by 
            // the VM. This means we do not read vtable, which means no 
            // memory access, so we can't use HW checks - have to use 
            // explicit one. Not a big loss, as the INVOKESPECIAL mostly
            // comes right after NEW which guarantees non-null.
            // in lazy resolution mode we must do manual check and provide helper with
            // non-null results.
            gen_check_null(*thiz, false);

        void * paddr = method_get_indirect_address(meth);
#ifdef _IA32_
        Opnd ptr(jobj, ar_x, paddr);
#else
        AR gr = valloc(jobj);
        movp(gr, paddr);
        ld(jobj, gr, gr);
        Opnd ptr(jobj, gr);
#endif
        call(ptr, cs, is_set(DBG_CHECK_STACK));
    }
    
    // to unlock after gen_stack_to_args()
    runlock(cs);
    // to unlock after explicit lock at the top of this method
    runlock(cs);
    
    if (retType != jvoid) {
        gen_save_ret(cs);
    }
    if (stackFix != 0) {
        alu(alu_sub, sp, stackFix);
    }
}