in hphp/hhbbc/emit.cpp [255:704]
EmitBcInfo emit_bytecode(EmitUnitState& euState, UnitEmitter& ue, FuncEmitter& fe,
const php::WideFunc& func) {
EmitBcInfo ret = {};
auto& blockInfo = ret.blockInfo;
blockInfo.resize(func.blocks().size());
// Track the stack depth while emitting to determine maxStackDepth.
int32_t currentStackDepth { 0 };
// Temporary buffer for vector immediates. (Hoisted so it's not
// allocated in the loop.)
std::vector<uint8_t> immVec;
// Offset of the last emitted bytecode.
Offset lastOff { 0 };
bool traceBc = false;
SCOPE_ASSERT_DETAIL("emit") {
std::string ret;
for (auto bid : func.blockRange()) {
auto const block = show(*func, *func.blocks()[bid]);
folly::format(&ret, "block #{}\n{}", bid, block);
}
return ret;
};
auto const map_local = [&] (LocalId id) {
if (id >= func->locals.size()) return id;
auto const loc = func->locals[id];
assertx(!loc.killed);
assertx(loc.id <= id);
id = loc.id;
return id;
};
auto const map_local_name = [&] (NamedLocal nl) {
nl.id = map_local(nl.id);
if (nl.name == kInvalidLocalName) return nl;
if (nl.name >= func->locals.size()) return nl;
auto const loc = func->locals[nl.name];
if (!loc.name) {
nl.name = kInvalidLocalName;
return nl;
}
assertx(!loc.unusedName);
nl.name = loc.nameId;
return nl;
};
auto const set_expected_depth = [&] (BlockId block) {
auto& info = blockInfo[block];
if (info.expectedStackDepth) {
assertx(*info.expectedStackDepth == currentStackDepth);
} else {
info.expectedStackDepth = currentStackDepth;
}
};
auto const make_member_key = [&] (MKey mkey) {
switch (mkey.mcode) {
case MEC: case MPC:
return MemberKey{mkey.mcode, static_cast<int32_t>(mkey.idx), mkey.rop};
case MEL: case MPL:
return MemberKey{mkey.mcode, map_local_name(mkey.local), mkey.rop};
case MET: case MPT: case MQT:
return MemberKey{mkey.mcode, mkey.litstr, mkey.rop};
case MEI:
return MemberKey{mkey.mcode, mkey.int64, mkey.rop};
case MW:
return MemberKey{};
}
not_reached();
};
auto const emit_inst = [&] (const Bytecode& inst) {
auto const startOffset = fe.bcPos();
lastOff = startOffset;
FTRACE(4, " emit: {} -- {} @ {}\n", currentStackDepth, show(func, inst),
show(srcLoc(*func, inst.srcLoc)));
if (options.TraceBytecodes.count(inst.op)) traceBc = true;
auto const emit_vsa = [&] (const CompactVector<LSString>& keys) {
auto n = keys.size();
fe.emitIVA(n);
for (size_t i = 0; i < n; ++i) {
fe.emitInt32(ue.mergeLitstr(keys[i]));
}
};
auto const emit_branch = [&] (BlockId id) {
auto& info = blockInfo[id];
if (info.offset != kInvalidOffset) {
fe.emitInt32(info.offset - startOffset);
} else {
info.forwardJumps.push_back({ startOffset, fe.bcPos() });
fe.emitInt32(0);
}
};
auto const emit_switch = [&] (const SwitchTab& targets) {
fe.emitIVA(targets.size());
for (auto t : targets) {
set_expected_depth(t);
emit_branch(t);
}
};
auto const emit_sswitch = [&] (const SSwitchTab& targets) {
fe.emitIVA(targets.size());
for (size_t i = 0; i < targets.size() - 1; ++i) {
set_expected_depth(targets[i].second);
fe.emitInt32(ue.mergeLitstr(targets[i].first));
emit_branch(targets[i].second);
}
fe.emitInt32(-1);
set_expected_depth(targets[targets.size() - 1].second);
emit_branch(targets[targets.size() - 1].second);
};
auto const emit_srcloc = [&] {
auto const sl = srcLoc(*func, inst.srcLoc);
auto const loc = sl.isValid() ?
Location::Range(sl.start.line, sl.start.col, sl.past.line, sl.past.col)
: Location::Range(-1,-1,-1,-1);
fe.recordSourceLocation(loc, startOffset);
};
auto const pop = [&] (int32_t n) {
currentStackDepth -= n;
assertx(currentStackDepth >= 0);
};
auto const push = [&] (int32_t n) {
currentStackDepth += n;
ret.maxStackDepth =
std::max<uint32_t>(ret.maxStackDepth, currentStackDepth);
};
auto const ret_assert = [&] { assertx(currentStackDepth == inst.numPop()); };
auto const createcl = [&] (auto& data) {
auto& id = data.arg2;
if (euState.classOffsets[id] != kInvalidOffset) {
for (auto const& elm : euState.pceInfo) {
if (elm.origId == id) {
id = elm.pce->id();
return;
}
}
always_assert(false);
}
euState.classOffsets[id] = startOffset;
id = recordClass(euState, ue, id);
};
auto const emit_lar = [&](const LocalRange& range) {
encodeLocalRange(fe, HPHP::LocalRange{
map_local(range.first), range.count
});
};
auto const emit_ita = [&](IterArgs ita) {
if (ita.hasKey()) ita.keyId = map_local(ita.keyId);
ita.valId = map_local(ita.valId);
encodeIterArgs(fe, ita);
};
#define IMM_BLA(n) emit_switch(data.targets);
#define IMM_SLA(n) emit_sswitch(data.targets);
#define IMM_IVA(n) fe.emitIVA(data.arg##n);
#define IMM_I64A(n) fe.emitInt64(data.arg##n);
#define IMM_LA(n) fe.emitIVA(map_local(data.loc##n));
#define IMM_NLA(n) fe.emitNamedLocal(map_local_name(data.nloc##n));
#define IMM_ILA(n) fe.emitIVA(map_local(data.loc##n));
#define IMM_IA(n) fe.emitIVA(data.iter##n);
#define IMM_DA(n) fe.emitDouble(data.dbl##n);
#define IMM_SA(n) fe.emitInt32(ue.mergeLitstr(data.str##n));
#define IMM_RATA(n) encodeRAT(fe, data.rat);
#define IMM_AA(n) fe.emitInt32(ue.mergeArray(data.arr##n));
#define IMM_OA_IMPL(n) fe.emitByte(static_cast<uint8_t>(data.subop##n));
#define IMM_OA(type) IMM_OA_IMPL
#define IMM_BA(n) targets[numTargets++] = data.target##n; \
emit_branch(data.target##n);
#define IMM_VSA(n) emit_vsa(data.keys);
#define IMM_KA(n) encode_member_key(make_member_key(data.mkey), fe);
#define IMM_LAR(n) emit_lar(data.locrange);
#define IMM_ITA(n) emit_ita(data.ita);
#define IMM_FCA(n) encodeFCallArgs( \
fe, data.fca.base(), \
data.fca.enforceInOut(), \
[&] { \
data.fca.applyIO( \
[&] (int numBytes, const uint8_t* inOut) { \
encodeFCallArgsBoolVec(fe, numBytes, inOut); \
} \
); \
}, \
data.fca.enforceReadonly(), \
[&] { \
data.fca.applyReadonly( \
[&] (int numBytes, const uint8_t* readonly) { \
encodeFCallArgsBoolVec(fe, numBytes, readonly); \
} \
); \
}, \
data.fca.asyncEagerTarget() != NoBlockId, \
[&] { \
set_expected_depth(data.fca.asyncEagerTarget()); \
emit_branch(data.fca.asyncEagerTarget()); \
}, \
data.fca.context() != nullptr, \
[&] { \
fe.emitInt32(ue.mergeLitstr(data.fca.context()));\
}); \
if (!data.fca.hasUnpack()) ret.containsCalls = true;
#define IMM_NA
#define IMM_ONE(x) IMM_##x(1)
#define IMM_TWO(x, y) IMM_##x(1); IMM_##y(2);
#define IMM_THREE(x, y, z) IMM_TWO(x, y); IMM_##z(3);
#define IMM_FOUR(x, y, z, n) IMM_THREE(x, y, z); IMM_##n(4);
#define IMM_FIVE(x, y, z, n, m) IMM_FOUR(x, y, z, n); IMM_##m(5);
#define IMM_SIX(x, y, z, n, m, o) IMM_FIVE(x, y, z, n, m); IMM_##o(6);
#define POP_NOV
#define POP_ONE(x) pop(1);
#define POP_TWO(x, y) pop(2);
#define POP_THREE(x, y, z) pop(3);
#define POP_MFINAL pop(data.arg1);
#define POP_C_MFINAL(n) pop(n); pop(data.arg1);
#define POP_CMANY pop(data.arg##1);
#define POP_SMANY pop(data.keys.size());
#define POP_CUMANY pop(data.arg##1);
#define POP_FCALL(nin, nobj) \
pop(nin + data.fca.numInputs() + 1 + data.fca.numRets());
#define PUSH_NOV
#define PUSH_ONE(x) push(1);
#define PUSH_TWO(x, y) push(2);
#define PUSH_THREE(x, y, z) push(3);
#define PUSH_CMANY push(data.arg1);
#define PUSH_FCALL push(data.fca.numRets());
#define O(opcode, imms, inputs, outputs, flags) \
case Op::opcode: { \
if (Op::opcode == Op::Nop) break; \
OpInfo<bc::opcode> data{inst.opcode}; \
if (RuntimeOption::EnableIntrinsicsExtension) { \
if (Op::opcode == Op::FCallFuncD && \
inst.FCallFuncD.str2->isame( \
s_hhbbc_fail_verification.get())) { \
fe.emitOp(Op::CheckProp); \
fe.emitInt32( \
ue.mergeLitstr(inst.FCallFuncD.str2)); \
fe.emitOp(Op::PopC); \
ret.maxStackDepth++; \
} \
} \
caller<Op::CreateCl>(createcl, data); \
\
if (isRet(Op::opcode)) ret_assert(); \
fe.emitOp(Op::opcode); \
POP_##inputs \
\
size_t numTargets = 0; \
std::array<BlockId, kMaxHhbcImms> targets; \
\
if (Op::opcode == Op::MemoGet) { \
IMM_##imms \
assertx(numTargets == 1); \
set_expected_depth(targets[0]); \
PUSH_##outputs \
} else if (Op::opcode == Op::MemoGetEager) { \
IMM_##imms \
assertx(numTargets == 2); \
set_expected_depth(targets[0]); \
PUSH_##outputs \
set_expected_depth(targets[1]); \
} else { \
PUSH_##outputs \
IMM_##imms \
for (size_t i = 0; i < numTargets; ++i) { \
set_expected_depth(targets[i]); \
} \
} \
\
if (flags & TF) currentStackDepth = 0; \
emit_srcloc(); \
break; \
}
switch (inst.op) { OPCODES }
#undef O
#undef IMM_MA
#undef IMM_BLA
#undef IMM_SLA
#undef IMM_IVA
#undef IMM_I64A
#undef IMM_LA
#undef IMM_NLA
#undef IMM_ILA
#undef IMM_IA
#undef IMM_DA
#undef IMM_SA
#undef IMM_RATA
#undef IMM_AA
#undef IMM_BA
#undef IMM_OA_IMPL
#undef IMM_OA
#undef IMM_VSA
#undef IMM_KA
#undef IMM_LAR
#undef IMM_FCA
#undef IMM_NA
#undef IMM_ONE
#undef IMM_TWO
#undef IMM_THREE
#undef IMM_FOUR
#undef IMM_FIVE
#undef IMM_SIX
#undef POP_NOV
#undef POP_ONE
#undef POP_TWO
#undef POP_THREE
#undef POP_CMANY
#undef POP_SMANY
#undef POP_CUMANY
#undef POP_FCALL
#undef POP_MFINAL
#undef POP_C_MFINAL
#undef PUSH_NOV
#undef PUSH_ONE
#undef PUSH_TWO
#undef PUSH_THREE
#undef PUSH_CMANY
#undef PUSH_FCALL
};
ret.blockOrder = order_blocks(func);
auto blockIt = begin(ret.blockOrder);
auto const endBlockIt = end(ret.blockOrder);
for (; blockIt != endBlockIt; ++blockIt) {
auto bid = *blockIt;
auto& info = blockInfo[bid];
auto const b = func.blocks()[bid].get();
info.offset = fe.bcPos();
FTRACE(2, " block {}: {}\n", bid, info.offset);
for (auto& fixup : info.forwardJumps) {
fe.emitInt32(info.offset - fixup.instrOff, fixup.jmpImmedOff);
}
if (!info.expectedStackDepth) {
// unreachable, or entry block
info.expectedStackDepth = b->catchEntry ? 1 : 0;
}
currentStackDepth = *info.expectedStackDepth;
auto fallthrough = b->fallthrough;
auto end = b->hhbcs.end();
auto flip = false;
if (is_single_nop(*b)) {
if (blockIt == begin(ret.blockOrder)) {
// If the first block is just a Nop, this means that there is
// a jump to the second block from somewhere in the
// function. We don't want this, so we change this nop to an
// EntryNop so it doesn't get optimized away
emit_inst(bc_with_loc(b->hhbcs.front().srcLoc, bc::EntryNop {}));
}
} else {
// If the block ends with JmpZ or JmpNZ to the next block, flip
// the condition to make the fallthrough the next block
if (b->hhbcs.back().op == Op::JmpZ ||
b->hhbcs.back().op == Op::JmpNZ) {
auto const& bc = b->hhbcs.back();
auto const target =
bc.op == Op::JmpNZ ? bc.JmpNZ.target1 : bc.JmpZ.target1;
if (std::next(blockIt) != endBlockIt && blockIt[1] == target) {
fallthrough = target;
--end;
flip = true;
}
}
for (auto iit = b->hhbcs.begin(); iit != end; ++iit) emit_inst(*iit);
if (flip) {
if (end->op == Op::JmpNZ) {
emit_inst(bc_with_loc(end->srcLoc, bc::JmpZ { b->fallthrough }));
} else {
emit_inst(bc_with_loc(end->srcLoc, bc::JmpNZ { b->fallthrough }));
}
}
}
info.past = fe.bcPos();
if (fallthrough != NoBlockId) {
set_expected_depth(fallthrough);
if (std::next(blockIt) == endBlockIt ||
blockIt[1] != fallthrough) {
if (b->fallthroughNS) {
emit_inst(bc::JmpNS { fallthrough });
} else {
emit_inst(bc::Jmp { fallthrough });
}
auto const nextExnId = func.blocks()[fallthrough]->exnNodeId;
auto const parent = commonParent(*func, nextExnId, b->exnNodeId);
auto depth = [&] (ExnNodeId eid) {
return eid == NoExnNodeId ? 0 : func->exnNodes[eid].depth;
};
// If we are in an exn region we pop from the current region to the
// common parent. If the common parent is null, we pop all regions
info.regionsToPop = depth(b->exnNodeId) - depth(parent);
assertx(info.regionsToPop >= 0);
FTRACE(4, " popped catch regions: {}\n", info.regionsToPop);
}
}
if (b->throwExit != NoBlockId) {
FTRACE(4, " throw: {}\n", b->throwExit);
}
if (fallthrough != NoBlockId) {
FTRACE(4, " fallthrough: {}\n", fallthrough);
}
FTRACE(2, " block {} end: {}\n", bid, info.past);
}
if (traceBc) {
FTRACE(0, "TraceBytecode (emit): {}::{} in {}\n",
func->cls ? func->cls->name->data() : "",
func->name, func->unit->filename);
}
return ret;
}