in opt/outliner/InstructionSequenceOutliner.cpp [944:1187]
static MethodCandidates find_method_candidates(
const Config& config,
const RefChecker& ref_checker,
const CanOutlineBlockDecider& block_decider,
DexMethod* method,
cfg::ControlFlowGraph& cfg,
const CandidateInstructionCoresSet& recurring_cores,
FindCandidatesStats* stats) {
MethodCandidates candidates;
Lazy<LivenessFixpointIterator> liveness_fp_iter([&cfg] {
auto res = std::make_unique<LivenessFixpointIterator>(cfg);
res->run({});
return res;
});
LazyUnorderedMap<cfg::Block*,
std::unordered_map<IRInstruction*, LivenessDomain>>
live_outs([&liveness_fp_iter](cfg::Block* block) {
std::unordered_map<IRInstruction*, LivenessDomain> res;
auto live_out = liveness_fp_iter->get_live_out_vars_at(block);
for (auto it = block->rbegin(); it != block->rend(); ++it) {
if (it->type != MFLOW_OPCODE) {
continue;
}
res.emplace(it->insn, live_out);
liveness_fp_iter->analyze_instruction(it->insn, &live_out);
}
return res;
});
// Variables that flow into a throw block, if any
LazyUnorderedMap<cfg::Block*, LivenessDomain> throw_live_out(
[&cfg, &liveness_fp_iter](cfg::Block* block) {
auto res = LivenessDomain::bottom();
for (auto e : cfg.get_succ_edges_of_type(block, cfg::EDGE_THROW)) {
res.join_with(liveness_fp_iter->get_live_in_vars_at(e->target()));
}
return res;
});
LazyReachingInitializedsEnvironments reaching_initialized_new_instances(
[&cfg]() {
return reaching_initializeds::get_reaching_initializeds(
cfg, reaching_initializeds::Mode::NewInstances);
});
OptionalReachingInitializedsEnvironments
reaching_initialized_init_first_param;
if (method::is_init(method)) {
reaching_initialized_init_first_param =
reaching_initializeds::get_reaching_initializeds(
cfg, reaching_initializeds::Mode::FirstLoadParam);
}
OutlinerTypeAnalysis type_analysis(method);
auto big_blocks = big_blocks::get_big_blocks(cfg);
// Along big blocks, we are assigning consecutive indices to instructions.
// We'll use this to manage ranges of instructions that need to get
// invalidated when overlapping ranges of insturctions are outlined.
Lazy<std::unordered_map<const IRInstruction*, size_t>> insn_idxes(
[&big_blocks]() {
std::unordered_map<const IRInstruction*, size_t> res;
for (auto& big_block : big_blocks) {
for (const auto& mie : big_blocks::InstructionIterable(big_block)) {
res.emplace(mie.insn, res.size());
}
}
return res;
});
struct {
#define FOR_EACH(name) size_t name{0};
STATS
#undef FOR_EACH
} lstats;
// We are visiting the instructions in this method in "big block" chunks:
// - The big blocks cover all blocks.
// - It is safe to do so as they all share the same throw-edges, and any
// outlined method invocation will be placed in the first block of the big
// block, with the appropriate throw edges.
for (auto& big_block : big_blocks) {
ExploredCallback explored_callback =
[&](PartialCandidate& pc,
cfg::Block* first_block,
const big_blocks::InstructionIterator& it,
cfg::Block* next_block) {
// At this point, we can consider the current candidate for
// normalization and outlining, adding it to the set of outlining
// candidates for this method
if (pc.insns_size < config.min_insns_size) {
// Code is below minimum configured size
return;
}
if (pc.size <= COST_INVOKE_WITHOUT_RESULT) {
// Partial candidate is not longer than the replacement invoke
// instruction would be
return;
}
std::vector<std::pair<reg_t, IRInstruction*>> live_out_consts;
boost::optional<std::pair<reg_t, bool>> out_reg;
if (!pc.root.defined_regs.empty()) {
LivenessDomain live_out;
if (next_block) {
live_out = liveness_fp_iter->get_live_in_vars_at(next_block);
} else {
live_out = live_outs[it.block()].at(it->insn);
}
auto first_block_throw_live_out = throw_live_out[first_block];
for (auto& p : pc.root.defined_regs) {
if (first_block_throw_live_out.contains(p.first)) {
TRACE(ISO, 4,
"[invoke sequence outliner] [bail out] Cannot return "
"value that's live-in to a throw edge");
lstats.live_out_to_throw_edge++;
return;
}
if (live_out.contains(p.first)) {
always_assert(p.second.state != RegState::INCONSISTENT);
if (out_reg) {
TRACE(ISO, 4,
"[invoke sequence outliner] [bail out] Cannot have "
"more than one out-reg");
lstats.live_out_multiple++;
return;
}
if (p.second.state == RegState::UNINITIALIZED) {
TRACE(ISO, 4,
"[invoke sequence outliner] [bail out] Cannot return "
"uninitialized");
lstats.live_out_initialized_not++;
return;
}
always_assert(p.second.state == RegState::INITIALIZED);
out_reg = std::make_pair(p.first, p.second.wide);
}
}
}
if (out_reg && pc.size <= COST_INVOKE_WITH_RESULT) {
// Code to outline is not longer than the replacement invoke
// instruction would be
return;
}
auto c = normalize(type_analysis, pc, out_reg);
if (std::find(c.arg_types.begin(), c.arg_types.end(), nullptr) !=
c.arg_types.end()) {
TRACE(ISO, 4,
"[invoke sequence outliner] [bail out] Could not infer "
"argument type");
lstats.arg_type_not_computed++;
return;
}
if (std::find_if(c.arg_types.begin(), c.arg_types.end(),
[&](const DexType* t) {
return !ref_checker.check_type(t);
}) != c.arg_types.end()) {
TRACE(ISO, 4,
"[invoke sequence outliner] [bail out] Illegal argument "
"type");
lstats.arg_type_illegal++;
return;
}
if (out_reg && c.res_type == nullptr) {
TRACE(ISO, 4,
"[invoke sequence outliner] [bail out] Could not infer "
"result type");
lstats.res_type_not_computed++;
return;
}
if (out_reg && !ref_checker.check_type(c.res_type)) {
TRACE(ISO, 4,
"[invoke sequence outliner] [bail out] Illegal result type");
lstats.res_type_illegal++;
return;
}
auto result = block_decider.can_outline_from_big_block(big_block);
if (result != CanOutlineBlockDecider::Result::CanOutline) {
// We could bail out on this way earlier, but doing it last gives us
// better statistics on what the damage really is
switch (result) {
case CanOutlineBlockDecider::Result::WarmLoop:
lstats.loop++;
return;
case CanOutlineBlockDecider::Result::WarmLoopExceedsThresholds:
lstats.block_warm_loop_exceeds_thresholds++;
return;
case CanOutlineBlockDecider::Result::WarmLoopNoSourceBlocks:
lstats.block_warm_loop_no_source_blocks++;
return;
case CanOutlineBlockDecider::Result::Hot:
lstats.block_hot++;
return;
case CanOutlineBlockDecider::Result::HotExceedsThresholds:
lstats.block_hot_exceeds_thresholds++;
return;
case CanOutlineBlockDecider::Result::HotNoSourceBlocks:
lstats.block_hot_no_source_blocks++;
return;
default:
not_reached();
}
}
auto& cmls = candidates[c];
std::map<size_t, size_t> ranges;
std::function<void(const PartialCandidateNode&)> insert_ranges;
insert_ranges = [&insert_ranges, &ranges,
&insn_idxes](const PartialCandidateNode& pcn) {
if (!pcn.insns.empty()) {
auto start = insn_idxes->at(pcn.insns.front());
auto size = pcn.insns.size();
ranges.emplace(start, size);
}
for (auto& p : pcn.succs) {
insert_ranges(*p.second);
}
};
insert_ranges(pc.root);
if (std::find_if(cmls.begin(), cmls.end(),
[&ranges](const CandidateMethodLocation& cml) {
return ranges_overlap(cml.ranges, ranges);
}) != cmls.end()) {
lstats.overlap++;
} else {
cmls.push_back((CandidateMethodLocation){
pc.root.insns.front(), first_block,
out_reg ? boost::optional<reg_t>(out_reg->first) : boost::none,
ranges});
}
};
auto ii = big_blocks::InstructionIterable(big_block);
for (auto it = ii.begin(), end = ii.end(); it != end; it++) {
if (opcode::is_move_result_any(it->insn->opcode())) {
// We cannot start a sequence at a move-result-any instruction
continue;
}
PartialCandidate pc;
explore_candidates_from(reaching_initialized_new_instances,
reaching_initialized_init_first_param, config,
ref_checker, recurring_cores, &pc, &pc.root, it,
end, &explored_callback);
}
}
#define FOR_EACH(name) \
if (lstats.name) stats->name += lstats.name;
STATS
#undef FOR_EACH
return candidates;
}