in llvm/lib/CodeGen/MachineVerifier.cpp [905:1634]
void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
if (isFunctionSelected)
report("Unexpected generic instruction in a Selected function", MI);
const MCInstrDesc &MCID = MI->getDesc();
unsigned NumOps = MI->getNumOperands();
// Branches must reference a basic block if they are not indirect
if (MI->isBranch() && !MI->isIndirectBranch()) {
bool HasMBB = false;
for (const MachineOperand &Op : MI->operands()) {
if (Op.isMBB()) {
HasMBB = true;
break;
}
}
if (!HasMBB) {
report("Branch instruction is missing a basic block operand or "
"isIndirectBranch property",
MI);
}
}
// Check types.
SmallVector<LLT, 4> Types;
for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
I != E; ++I) {
if (!MCID.OpInfo[I].isGenericType())
continue;
// Generic instructions specify type equality constraints between some of
// their operands. Make sure these are consistent.
size_t TypeIdx = MCID.OpInfo[I].getGenericTypeIndex();
Types.resize(std::max(TypeIdx + 1, Types.size()));
const MachineOperand *MO = &MI->getOperand(I);
if (!MO->isReg()) {
report("generic instruction must use register operands", MI);
continue;
}
LLT OpTy = MRI->getType(MO->getReg());
// Don't report a type mismatch if there is no actual mismatch, only a
// type missing, to reduce noise:
if (OpTy.isValid()) {
// Only the first valid type for a type index will be printed: don't
// overwrite it later so it's always clear which type was expected:
if (!Types[TypeIdx].isValid())
Types[TypeIdx] = OpTy;
else if (Types[TypeIdx] != OpTy)
report("Type mismatch in generic instruction", MO, I, OpTy);
} else {
// Generic instructions must have types attached to their operands.
report("Generic instruction is missing a virtual register type", MO, I);
}
}
// Generic opcodes must not have physical register operands.
for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
const MachineOperand *MO = &MI->getOperand(I);
if (MO->isReg() && Register::isPhysicalRegister(MO->getReg()))
report("Generic instruction cannot have physical register", MO, I);
}
// Avoid out of bounds in checks below. This was already reported earlier.
if (MI->getNumOperands() < MCID.getNumOperands())
return;
StringRef ErrorInfo;
if (!TII->verifyInstruction(*MI, ErrorInfo))
report(ErrorInfo.data(), MI);
// Verify properties of various specific instruction types
unsigned Opc = MI->getOpcode();
switch (Opc) {
case TargetOpcode::G_ASSERT_SEXT:
case TargetOpcode::G_ASSERT_ZEXT: {
std::string OpcName =
Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
if (!MI->getOperand(2).isImm()) {
report(Twine(OpcName, " expects an immediate operand #2"), MI);
break;
}
Register Dst = MI->getOperand(0).getReg();
Register Src = MI->getOperand(1).getReg();
LLT SrcTy = MRI->getType(Src);
int64_t Imm = MI->getOperand(2).getImm();
if (Imm <= 0) {
report(Twine(OpcName, " size must be >= 1"), MI);
break;
}
if (Imm >= SrcTy.getScalarSizeInBits()) {
report(Twine(OpcName, " size must be less than source bit width"), MI);
break;
}
if (MRI->getRegBankOrNull(Src) != MRI->getRegBankOrNull(Dst)) {
report(
Twine(OpcName, " source and destination register banks must match"),
MI);
break;
}
if (MRI->getRegClassOrNull(Src) != MRI->getRegClassOrNull(Dst))
report(
Twine(OpcName, " source and destination register classes must match"),
MI);
break;
}
case TargetOpcode::G_CONSTANT:
case TargetOpcode::G_FCONSTANT: {
LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
if (DstTy.isVector())
report("Instruction cannot use a vector result type", MI);
if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
if (!MI->getOperand(1).isCImm()) {
report("G_CONSTANT operand must be cimm", MI);
break;
}
const ConstantInt *CI = MI->getOperand(1).getCImm();
if (CI->getBitWidth() != DstTy.getSizeInBits())
report("inconsistent constant size", MI);
} else {
if (!MI->getOperand(1).isFPImm()) {
report("G_FCONSTANT operand must be fpimm", MI);
break;
}
const ConstantFP *CF = MI->getOperand(1).getFPImm();
if (APFloat::getSizeInBits(CF->getValueAPF().getSemantics()) !=
DstTy.getSizeInBits()) {
report("inconsistent constant size", MI);
}
}
break;
}
case TargetOpcode::G_LOAD:
case TargetOpcode::G_STORE:
case TargetOpcode::G_ZEXTLOAD:
case TargetOpcode::G_SEXTLOAD: {
LLT ValTy = MRI->getType(MI->getOperand(0).getReg());
LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
if (!PtrTy.isPointer())
report("Generic memory instruction must access a pointer", MI);
// Generic loads and stores must have a single MachineMemOperand
// describing that access.
if (!MI->hasOneMemOperand()) {
report("Generic instruction accessing memory must have one mem operand",
MI);
} else {
const MachineMemOperand &MMO = **MI->memoperands_begin();
if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
if (MMO.getSizeInBits() >= ValTy.getSizeInBits())
report("Generic extload must have a narrower memory type", MI);
} else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
if (MMO.getSize() > ValTy.getSizeInBytes())
report("load memory size cannot exceed result size", MI);
} else if (MI->getOpcode() == TargetOpcode::G_STORE) {
if (ValTy.getSizeInBytes() < MMO.getSize())
report("store memory size cannot exceed value size", MI);
}
}
break;
}
case TargetOpcode::G_PHI: {
LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()),
[this, &DstTy](const MachineOperand &MO) {
if (!MO.isReg())
return true;
LLT Ty = MRI->getType(MO.getReg());
if (!Ty.isValid() || (Ty != DstTy))
return false;
return true;
}))
report("Generic Instruction G_PHI has operands with incompatible/missing "
"types",
MI);
break;
}
case TargetOpcode::G_BITCAST: {
LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
if (!DstTy.isValid() || !SrcTy.isValid())
break;
if (SrcTy.isPointer() != DstTy.isPointer())
report("bitcast cannot convert between pointers and other types", MI);
if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
report("bitcast sizes must match", MI);
if (SrcTy == DstTy)
report("bitcast must change the type", MI);
break;
}
case TargetOpcode::G_INTTOPTR:
case TargetOpcode::G_PTRTOINT:
case TargetOpcode::G_ADDRSPACE_CAST: {
LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
if (!DstTy.isValid() || !SrcTy.isValid())
break;
verifyVectorElementMatch(DstTy, SrcTy, MI);
DstTy = DstTy.getScalarType();
SrcTy = SrcTy.getScalarType();
if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
if (!DstTy.isPointer())
report("inttoptr result type must be a pointer", MI);
if (SrcTy.isPointer())
report("inttoptr source type must not be a pointer", MI);
} else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
if (!SrcTy.isPointer())
report("ptrtoint source type must be a pointer", MI);
if (DstTy.isPointer())
report("ptrtoint result type must not be a pointer", MI);
} else {
assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
if (!SrcTy.isPointer() || !DstTy.isPointer())
report("addrspacecast types must be pointers", MI);
else {
if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
report("addrspacecast must convert different address spaces", MI);
}
}
break;
}
case TargetOpcode::G_PTR_ADD: {
LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());
if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
break;
if (!PtrTy.getScalarType().isPointer())
report("gep first operand must be a pointer", MI);
if (OffsetTy.getScalarType().isPointer())
report("gep offset operand must not be a pointer", MI);
// TODO: Is the offset allowed to be a scalar with a vector?
break;
}
case TargetOpcode::G_PTRMASK: {
LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
LLT MaskTy = MRI->getType(MI->getOperand(2).getReg());
if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
break;
if (!DstTy.getScalarType().isPointer())
report("ptrmask result type must be a pointer", MI);
if (!MaskTy.getScalarType().isScalar())
report("ptrmask mask type must be an integer", MI);
verifyVectorElementMatch(DstTy, MaskTy, MI);
break;
}
case TargetOpcode::G_SEXT:
case TargetOpcode::G_ZEXT:
case TargetOpcode::G_ANYEXT:
case TargetOpcode::G_TRUNC:
case TargetOpcode::G_FPEXT:
case TargetOpcode::G_FPTRUNC: {
// Number of operands and presense of types is already checked (and
// reported in case of any issues), so no need to report them again. As
// we're trying to report as many issues as possible at once, however, the
// instructions aren't guaranteed to have the right number of operands or
// types attached to them at this point
assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
if (!DstTy.isValid() || !SrcTy.isValid())
break;
LLT DstElTy = DstTy.getScalarType();
LLT SrcElTy = SrcTy.getScalarType();
if (DstElTy.isPointer() || SrcElTy.isPointer())
report("Generic extend/truncate can not operate on pointers", MI);
verifyVectorElementMatch(DstTy, SrcTy, MI);
unsigned DstSize = DstElTy.getSizeInBits();
unsigned SrcSize = SrcElTy.getSizeInBits();
switch (MI->getOpcode()) {
default:
if (DstSize <= SrcSize)
report("Generic extend has destination type no larger than source", MI);
break;
case TargetOpcode::G_TRUNC:
case TargetOpcode::G_FPTRUNC:
if (DstSize >= SrcSize)
report("Generic truncate has destination type no smaller than source",
MI);
break;
}
break;
}
case TargetOpcode::G_SELECT: {
LLT SelTy = MRI->getType(MI->getOperand(0).getReg());
LLT CondTy = MRI->getType(MI->getOperand(1).getReg());
if (!SelTy.isValid() || !CondTy.isValid())
break;
// Scalar condition select on a vector is valid.
if (CondTy.isVector())
verifyVectorElementMatch(SelTy, CondTy, MI);
break;
}
case TargetOpcode::G_MERGE_VALUES: {
// G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
// e.g. s2N = MERGE sN, sN
// Merging multiple scalars into a vector is not allowed, should use
// G_BUILD_VECTOR for that.
LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
if (DstTy.isVector() || SrcTy.isVector())
report("G_MERGE_VALUES cannot operate on vectors", MI);
const unsigned NumOps = MI->getNumOperands();
if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
report("G_MERGE_VALUES result size is inconsistent", MI);
for (unsigned I = 2; I != NumOps; ++I) {
if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy)
report("G_MERGE_VALUES source types do not match", MI);
}
break;
}
case TargetOpcode::G_UNMERGE_VALUES: {
LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
LLT SrcTy = MRI->getType(MI->getOperand(MI->getNumOperands()-1).getReg());
// For now G_UNMERGE can split vectors.
for (unsigned i = 0; i < MI->getNumOperands()-1; ++i) {
if (MRI->getType(MI->getOperand(i).getReg()) != DstTy)
report("G_UNMERGE_VALUES destination types do not match", MI);
}
if (SrcTy.getSizeInBits() !=
(DstTy.getSizeInBits() * (MI->getNumOperands() - 1))) {
report("G_UNMERGE_VALUES source operand does not cover dest operands",
MI);
}
break;
}
case TargetOpcode::G_BUILD_VECTOR: {
// Source types must be scalars, dest type a vector. Total size of scalars
// must match the dest vector size.
LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
if (!DstTy.isVector() || SrcEltTy.isVector()) {
report("G_BUILD_VECTOR must produce a vector from scalar operands", MI);
break;
}
if (DstTy.getElementType() != SrcEltTy)
report("G_BUILD_VECTOR result element type must match source type", MI);
if (DstTy.getNumElements() != MI->getNumOperands() - 1)
report("G_BUILD_VECTOR must have an operand for each elemement", MI);
for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
report("G_BUILD_VECTOR source operand types are not homogeneous", MI);
break;
}
case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
// Source types must be scalars, dest type a vector. Scalar types must be
// larger than the dest vector elt type, as this is a truncating operation.
LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
if (!DstTy.isVector() || SrcEltTy.isVector())
report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
MI);
for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
MI);
if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
"dest elt type",
MI);
break;
}
case TargetOpcode::G_CONCAT_VECTORS: {
// Source types should be vectors, and total size should match the dest
// vector size.
LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
if (!DstTy.isVector() || !SrcTy.isVector())
report("G_CONCAT_VECTOR requires vector source and destination operands",
MI);
if (MI->getNumOperands() < 3)
report("G_CONCAT_VECTOR requires at least 2 source operands", MI);
for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
report("G_CONCAT_VECTOR source operand types are not homogeneous", MI);
if (DstTy.getNumElements() !=
SrcTy.getNumElements() * (MI->getNumOperands() - 1))
report("G_CONCAT_VECTOR num dest and source elements should match", MI);
break;
}
case TargetOpcode::G_ICMP:
case TargetOpcode::G_FCMP: {
LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
if ((DstTy.isVector() != SrcTy.isVector()) ||
(DstTy.isVector() && DstTy.getNumElements() != SrcTy.getNumElements()))
report("Generic vector icmp/fcmp must preserve number of lanes", MI);
break;
}
case TargetOpcode::G_EXTRACT: {
const MachineOperand &SrcOp = MI->getOperand(1);
if (!SrcOp.isReg()) {
report("extract source must be a register", MI);
break;
}
const MachineOperand &OffsetOp = MI->getOperand(2);
if (!OffsetOp.isImm()) {
report("extract offset must be a constant", MI);
break;
}
unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
if (SrcSize == DstSize)
report("extract source must be larger than result", MI);
if (DstSize + OffsetOp.getImm() > SrcSize)
report("extract reads past end of register", MI);
break;
}
case TargetOpcode::G_INSERT: {
const MachineOperand &SrcOp = MI->getOperand(2);
if (!SrcOp.isReg()) {
report("insert source must be a register", MI);
break;
}
const MachineOperand &OffsetOp = MI->getOperand(3);
if (!OffsetOp.isImm()) {
report("insert offset must be a constant", MI);
break;
}
unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
if (DstSize <= SrcSize)
report("inserted size must be smaller than total register", MI);
if (SrcSize + OffsetOp.getImm() > DstSize)
report("insert writes past end of register", MI);
break;
}
case TargetOpcode::G_JUMP_TABLE: {
if (!MI->getOperand(1).isJTI())
report("G_JUMP_TABLE source operand must be a jump table index", MI);
LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
if (!DstTy.isPointer())
report("G_JUMP_TABLE dest operand must have a pointer type", MI);
break;
}
case TargetOpcode::G_BRJT: {
if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
report("G_BRJT src operand 0 must be a pointer type", MI);
if (!MI->getOperand(1).isJTI())
report("G_BRJT src operand 1 must be a jump table index", MI);
const auto &IdxOp = MI->getOperand(2);
if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer())
report("G_BRJT src operand 2 must be a scalar reg type", MI);
break;
}
case TargetOpcode::G_INTRINSIC:
case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: {
// TODO: Should verify number of def and use operands, but the current
// interface requires passing in IR types for mangling.
const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs());
if (!IntrIDOp.isIntrinsicID()) {
report("G_INTRINSIC first src operand must be an intrinsic ID", MI);
break;
}
bool NoSideEffects = MI->getOpcode() == TargetOpcode::G_INTRINSIC;
unsigned IntrID = IntrIDOp.getIntrinsicID();
if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
AttributeList Attrs
= Intrinsic::getAttributes(MF->getFunction().getContext(),
static_cast<Intrinsic::ID>(IntrID));
bool DeclHasSideEffects = !Attrs.hasFnAttr(Attribute::ReadNone);
if (NoSideEffects && DeclHasSideEffects) {
report("G_INTRINSIC used with intrinsic that accesses memory", MI);
break;
}
if (!NoSideEffects && !DeclHasSideEffects) {
report("G_INTRINSIC_W_SIDE_EFFECTS used with readnone intrinsic", MI);
break;
}
}
break;
}
case TargetOpcode::G_SEXT_INREG: {
if (!MI->getOperand(2).isImm()) {
report("G_SEXT_INREG expects an immediate operand #2", MI);
break;
}
LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
int64_t Imm = MI->getOperand(2).getImm();
if (Imm <= 0)
report("G_SEXT_INREG size must be >= 1", MI);
if (Imm >= SrcTy.getScalarSizeInBits())
report("G_SEXT_INREG size must be less than source bit width", MI);
break;
}
case TargetOpcode::G_SHUFFLE_VECTOR: {
const MachineOperand &MaskOp = MI->getOperand(3);
if (!MaskOp.isShuffleMask()) {
report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
break;
}
LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg());
LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg());
if (Src0Ty != Src1Ty)
report("Source operands must be the same type", MI);
if (Src0Ty.getScalarType() != DstTy.getScalarType())
report("G_SHUFFLE_VECTOR cannot change element type", MI);
// Don't check that all operands are vector because scalars are used in
// place of 1 element vectors.
int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1;
int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1;
ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
report("Wrong result type for shufflemask", MI);
for (int Idx : MaskIdxes) {
if (Idx < 0)
continue;
if (Idx >= 2 * SrcNumElts)
report("Out of bounds shuffle index", MI);
}
break;
}
case TargetOpcode::G_DYN_STACKALLOC: {
const MachineOperand &DstOp = MI->getOperand(0);
const MachineOperand &AllocOp = MI->getOperand(1);
const MachineOperand &AlignOp = MI->getOperand(2);
if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) {
report("dst operand 0 must be a pointer type", MI);
break;
}
if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) {
report("src operand 1 must be a scalar reg type", MI);
break;
}
if (!AlignOp.isImm()) {
report("src operand 2 must be an immediate type", MI);
break;
}
break;
}
case TargetOpcode::G_MEMCPY_INLINE:
case TargetOpcode::G_MEMCPY:
case TargetOpcode::G_MEMMOVE: {
ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
if (MMOs.size() != 2) {
report("memcpy/memmove must have 2 memory operands", MI);
break;
}
if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
(MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
report("wrong memory operand types", MI);
break;
}
if (MMOs[0]->getSize() != MMOs[1]->getSize())
report("inconsistent memory operand sizes", MI);
LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg());
if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
report("memory instruction operand must be a pointer", MI);
break;
}
if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
report("inconsistent store address space", MI);
if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
report("inconsistent load address space", MI);
if (Opc != TargetOpcode::G_MEMCPY_INLINE)
if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL))
report("'tail' flag (operand 3) must be an immediate 0 or 1", MI);
break;
}
case TargetOpcode::G_BZERO:
case TargetOpcode::G_MEMSET: {
ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
if (MMOs.size() != 1) {
report(Twine(Name, " must have 1 memory operand"), MI);
break;
}
if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
report(Twine(Name, " memory operand must be a store"), MI);
break;
}
LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
if (!DstPtrTy.isPointer()) {
report(Twine(Name, " operand must be a pointer"), MI);
break;
}
if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
report("inconsistent " + Twine(Name, " address space"), MI);
if (!MI->getOperand(MI->getNumOperands() - 1).isImm() ||
(MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL))
report("'tail' flag (last operand) must be an immediate 0 or 1", MI);
break;
}
case TargetOpcode::G_VECREDUCE_SEQ_FADD:
case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
if (!DstTy.isScalar())
report("Vector reduction requires a scalar destination type", MI);
if (!Src1Ty.isScalar())
report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
if (!Src2Ty.isVector())
report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
break;
}
case TargetOpcode::G_VECREDUCE_FADD:
case TargetOpcode::G_VECREDUCE_FMUL:
case TargetOpcode::G_VECREDUCE_FMAX:
case TargetOpcode::G_VECREDUCE_FMIN:
case TargetOpcode::G_VECREDUCE_ADD:
case TargetOpcode::G_VECREDUCE_MUL:
case TargetOpcode::G_VECREDUCE_AND:
case TargetOpcode::G_VECREDUCE_OR:
case TargetOpcode::G_VECREDUCE_XOR:
case TargetOpcode::G_VECREDUCE_SMAX:
case TargetOpcode::G_VECREDUCE_SMIN:
case TargetOpcode::G_VECREDUCE_UMAX:
case TargetOpcode::G_VECREDUCE_UMIN: {
LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
if (!DstTy.isScalar())
report("Vector reduction requires a scalar destination type", MI);
break;
}
case TargetOpcode::G_SBFX:
case TargetOpcode::G_UBFX: {
LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
if (DstTy.isVector()) {
report("Bitfield extraction is not supported on vectors", MI);
break;
}
break;
}
case TargetOpcode::G_SHL:
case TargetOpcode::G_LSHR:
case TargetOpcode::G_ASHR:
case TargetOpcode::G_ROTR:
case TargetOpcode::G_ROTL: {
LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
if (Src1Ty.isVector() != Src2Ty.isVector()) {
report("Shifts and rotates require operands to be either all scalars or "
"all vectors",
MI);
break;
}
break;
}
case TargetOpcode::G_LLROUND:
case TargetOpcode::G_LROUND: {
verifyAllRegOpsScalar(*MI, *MRI);
break;
}
default:
break;
}
}