in erts/emulator/asmjit/x86/x86rapass.cpp [129:400]
Error X86RACFGBuilder::onInst(InstNode* inst, uint32_t& controlType, RAInstBuilder& ib) noexcept {
InstRWInfo rwInfo;
uint32_t instId = inst->id();
if (Inst::isDefinedId(instId)) {
uint32_t opCount = inst->opCount();
const Operand* opArray = inst->operands();
ASMJIT_PROPAGATE(InstInternal::queryRWInfo(_arch, inst->baseInst(), opArray, opCount, &rwInfo));
const InstDB::InstInfo& instInfo = InstDB::infoById(instId);
bool hasGpbHiConstraint = false;
uint32_t singleRegOps = 0;
if (opCount) {
for (uint32_t i = 0; i < opCount; i++) {
const Operand& op = opArray[i];
const OpRWInfo& opRwInfo = rwInfo.operand(i);
if (op.isReg()) {
// Register Operand
// ----------------
const Reg& reg = op.as<Reg>();
uint32_t flags = raRegRwFlags(opRwInfo.opFlags());
uint32_t allowedRegs = 0xFFFFFFFFu;
// X86-specific constraints related to LO|HI general purpose registers.
// This is only required when the register is part of the encoding. If
// the register is fixed we won't restrict anything as it doesn't restrict
// encoding of other registers.
if (reg.isGpb() && !(opRwInfo.opFlags() & OpRWInfo::kRegPhysId)) {
flags |= RATiedReg::kX86Gpb;
if (!_is64Bit) {
// Restrict to first four - AL|AH|BL|BH|CL|CH|DL|DH. In 32-bit mode
// it's not possible to access SIL|DIL, etc, so this is just enough.
allowedRegs = 0x0Fu;
}
else {
// If we encountered GPB-HI register the situation is much more
// complicated than in 32-bit mode. We need to patch all registers
// to not use ID higher than 7 and all GPB-LO registers to not use
// index higher than 3. Instead of doing the patching here we just
// set a flag and will do it later, to not complicate this loop.
if (reg.isGpbHi()) {
hasGpbHiConstraint = true;
allowedRegs = 0x0Fu;
}
}
}
uint32_t vIndex = Operand::virtIdToIndex(reg.id());
if (vIndex < Operand::kVirtIdCount) {
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg));
// Use RW instead of Write in case that not the whole register is
// overwritten. This is important for liveness as we cannot kill a
// register that will be used. For example `mov al, 0xFF` is not a
// write-only operation if user allocated the whole `rax` register.
if ((flags & RATiedReg::kRW) == RATiedReg::kWrite) {
if (workReg->regByteMask() & ~(opRwInfo.writeByteMask() | opRwInfo.extendByteMask())) {
// Not write-only operation.
flags = (flags & ~RATiedReg::kOut) | (RATiedReg::kRead | RATiedReg::kUse);
}
}
// Do not use RegMem flag if changing Reg to Mem requires additional
// CPU feature that may not be enabled.
if (rwInfo.rmFeature() && (flags & (RATiedReg::kUseRM | RATiedReg::kOutRM))) {
flags &= ~(RATiedReg::kUseRM | RATiedReg::kOutRM);
}
uint32_t group = workReg->group();
uint32_t allocable = _pass->_availableRegs[group] & allowedRegs;
uint32_t useId = BaseReg::kIdBad;
uint32_t outId = BaseReg::kIdBad;
uint32_t useRewriteMask = 0;
uint32_t outRewriteMask = 0;
if (flags & RATiedReg::kUse) {
useRewriteMask = Support::bitMask(inst->getRewriteIndex(®._baseId));
if (opRwInfo.opFlags() & OpRWInfo::kRegPhysId) {
useId = opRwInfo.physId();
flags |= RATiedReg::kUseFixed;
}
}
else {
outRewriteMask = Support::bitMask(inst->getRewriteIndex(®._baseId));
if (opRwInfo.opFlags() & OpRWInfo::kRegPhysId) {
outId = opRwInfo.physId();
flags |= RATiedReg::kOutFixed;
}
}
ASMJIT_PROPAGATE(ib.add(workReg, flags, allocable, useId, useRewriteMask, outId, outRewriteMask, opRwInfo.rmSize()));
if (singleRegOps == i)
singleRegOps++;
}
}
else if (op.isMem()) {
// Memory Operand
// --------------
const Mem& mem = op.as<Mem>();
ib.addForbiddenFlags(RATiedReg::kUseRM | RATiedReg::kOutRM);
if (mem.isRegHome()) {
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(mem.baseId()), &workReg));
_pass->getOrCreateStackSlot(workReg);
}
else if (mem.hasBaseReg()) {
uint32_t vIndex = Operand::virtIdToIndex(mem.baseId());
if (vIndex < Operand::kVirtIdCount) {
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg));
uint32_t flags = raMemBaseRwFlags(opRwInfo.opFlags());
uint32_t group = workReg->group();
uint32_t allocable = _pass->_availableRegs[group];
uint32_t useId = BaseReg::kIdBad;
uint32_t outId = BaseReg::kIdBad;
uint32_t useRewriteMask = 0;
uint32_t outRewriteMask = 0;
if (flags & RATiedReg::kUse) {
useRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._baseId));
if (opRwInfo.opFlags() & OpRWInfo::kMemPhysId) {
useId = opRwInfo.physId();
flags |= RATiedReg::kUseFixed;
}
}
else {
outRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._baseId));
if (opRwInfo.opFlags() & OpRWInfo::kMemPhysId) {
outId = opRwInfo.physId();
flags |= RATiedReg::kOutFixed;
}
}
ASMJIT_PROPAGATE(ib.add(workReg, flags, allocable, useId, useRewriteMask, outId, outRewriteMask));
}
}
if (mem.hasIndexReg()) {
uint32_t vIndex = Operand::virtIdToIndex(mem.indexId());
if (vIndex < Operand::kVirtIdCount) {
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg));
uint32_t flags = raMemIndexRwFlags(opRwInfo.opFlags());
uint32_t group = workReg->group();
uint32_t allocable = _pass->_availableRegs[group];
// Index registers have never fixed id on X86/x64.
const uint32_t useId = BaseReg::kIdBad;
const uint32_t outId = BaseReg::kIdBad;
uint32_t useRewriteMask = 0;
uint32_t outRewriteMask = 0;
if (flags & RATiedReg::kUse)
useRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._data[Operand::kDataMemIndexId]));
else
outRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._data[Operand::kDataMemIndexId]));
ASMJIT_PROPAGATE(ib.add(workReg, RATiedReg::kUse | RATiedReg::kRead, allocable, useId, useRewriteMask, outId, outRewriteMask));
}
}
}
}
}
// Handle extra operand (either REP {cx|ecx|rcx} or AVX-512 {k} selector).
if (inst->hasExtraReg()) {
uint32_t vIndex = Operand::virtIdToIndex(inst->extraReg().id());
if (vIndex < Operand::kVirtIdCount) {
RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg));
uint32_t group = workReg->group();
uint32_t rewriteMask = Support::bitMask(inst->getRewriteIndex(&inst->extraReg()._id));
if (group == Gp::kGroupKReg) {
// AVX-512 mask selector {k} register - read-only, allocable to any register except {k0}.
uint32_t allocableRegs= _pass->_availableRegs[group] & ~Support::bitMask(0);
ASMJIT_PROPAGATE(ib.add(workReg, RATiedReg::kUse | RATiedReg::kRead, allocableRegs, BaseReg::kIdBad, rewriteMask, BaseReg::kIdBad, 0));
singleRegOps = 0;
}
else {
// REP {cx|ecx|rcx} register - read & write, allocable to {cx|ecx|rcx} only.
ASMJIT_PROPAGATE(ib.add(workReg, RATiedReg::kUse | RATiedReg::kRW, 0, Gp::kIdCx, rewriteMask, Gp::kIdBad, 0));
}
}
else {
uint32_t group = inst->extraReg().group();
if (group == Gp::kGroupKReg && inst->extraReg().id() != 0)
singleRegOps = 0;
}
}
// Handle X86 constraints.
if (hasGpbHiConstraint) {
for (RATiedReg& tiedReg : ib) {
tiedReg._allocableRegs &= tiedReg.hasFlag(RATiedReg::kX86Gpb) ? 0x0Fu : 0xFFu;
}
}
if (ib.tiedRegCount() == 1) {
// Handle special cases of some instructions where all operands share the same
// register. In such case the single operand becomes read-only or write-only.
uint32_t singleRegCase = InstDB::kSingleRegNone;
if (singleRegOps == opCount) {
singleRegCase = instInfo.singleRegCase();
}
else if (opCount == 2 && inst->op(1).isImm()) {
// Handle some tricks used by X86 asm.
const BaseReg& reg = inst->op(0).as<BaseReg>();
const Imm& imm = inst->op(1).as<Imm>();
const RAWorkReg* workReg = _pass->workRegById(ib[0]->workId());
uint32_t workRegSize = workReg->info().size();
switch (inst->id()) {
case Inst::kIdOr: {
// Sets the value of the destination register to -1, previous content unused.
if (reg.size() >= 4 || reg.size() >= workRegSize) {
if (imm.value() == -1 || imm.valueAs<uint64_t>() == raImmMaskFromSize(reg.size()))
singleRegCase = InstDB::kSingleRegWO;
}
ASMJIT_FALLTHROUGH;
}
case Inst::kIdAdd:
case Inst::kIdAnd:
case Inst::kIdRol:
case Inst::kIdRor:
case Inst::kIdSar:
case Inst::kIdShl:
case Inst::kIdShr:
case Inst::kIdSub:
case Inst::kIdXor: {
// Updates [E|R]FLAGS without changing the content.
if (reg.size() != 4 || reg.size() >= workRegSize) {
if (imm.value() == 0)
singleRegCase = InstDB::kSingleRegRO;
}
break;
}
}
}
switch (singleRegCase) {
case InstDB::kSingleRegNone:
break;
case InstDB::kSingleRegRO:
ib[0]->makeReadOnly();
break;
case InstDB::kSingleRegWO:
ib[0]->makeWriteOnly();
break;
}
}
controlType = instInfo.controlType();
}
return kErrorOk;
}