in llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp [499:1573]
void RISCVDAGToDAGISel::Select(SDNode *Node) {
// If we have a custom node, we have already selected.
if (Node->isMachineOpcode()) {
LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
Node->setNodeId(-1);
return;
}
// Instruction Selection not handled by the auto-generated tablegen selection
// should be handled here.
unsigned Opcode = Node->getOpcode();
MVT XLenVT = Subtarget->getXLenVT();
SDLoc DL(Node);
MVT VT = Node->getSimpleValueType(0);
switch (Opcode) {
case ISD::Constant: {
auto *ConstNode = cast<ConstantSDNode>(Node);
if (VT == XLenVT && ConstNode->isZero()) {
SDValue New =
CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, XLenVT);
ReplaceNode(Node, New.getNode());
return;
}
int64_t Imm = ConstNode->getSExtValue();
// If the upper XLen-16 bits are not used, try to convert this to a simm12
// by sign extending bit 15.
if (isUInt<16>(Imm) && isInt<12>(SignExtend64(Imm, 16)) &&
hasAllHUsers(Node))
Imm = SignExtend64(Imm, 16);
// If the upper 32-bits are not used try to convert this into a simm32 by
// sign extending bit 32.
if (!isInt<32>(Imm) && isUInt<32>(Imm) && hasAllWUsers(Node))
Imm = SignExtend64(Imm, 32);
ReplaceNode(Node, selectImm(CurDAG, DL, VT, Imm, *Subtarget));
return;
}
case ISD::FrameIndex: {
SDValue Imm = CurDAG->getTargetConstant(0, DL, XLenVT);
int FI = cast<FrameIndexSDNode>(Node)->getIndex();
SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
ReplaceNode(Node, CurDAG->getMachineNode(RISCV::ADDI, DL, VT, TFI, Imm));
return;
}
case ISD::SRL: {
// Optimize (srl (and X, C2), C) ->
// (srli (slli X, (XLen-C3), (XLen-C3) + C)
// Where C2 is a mask with C3 trailing ones.
// Taking into account that the C2 may have had lower bits unset by
// SimplifyDemandedBits. This avoids materializing the C2 immediate.
// This pattern occurs when type legalizing right shifts for types with
// less than XLen bits.
auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
if (!N1C)
break;
SDValue N0 = Node->getOperand(0);
if (N0.getOpcode() != ISD::AND || !N0.hasOneUse() ||
!isa<ConstantSDNode>(N0.getOperand(1)))
break;
unsigned ShAmt = N1C->getZExtValue();
uint64_t Mask = N0.getConstantOperandVal(1);
Mask |= maskTrailingOnes<uint64_t>(ShAmt);
if (!isMask_64(Mask))
break;
unsigned TrailingOnes = countTrailingOnes(Mask);
// 32 trailing ones should use srliw via tablegen pattern.
if (TrailingOnes == 32 || ShAmt >= TrailingOnes)
break;
unsigned LShAmt = Subtarget->getXLen() - TrailingOnes;
SDNode *SLLI =
CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
CurDAG->getTargetConstant(LShAmt, DL, VT));
SDNode *SRLI = CurDAG->getMachineNode(
RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
ReplaceNode(Node, SRLI);
return;
}
case ISD::SRA: {
// Optimize (sra (sext_inreg X, i16), C) ->
// (srai (slli X, (XLen-16), (XLen-16) + C)
// And (sra (sext_inreg X, i8), C) ->
// (srai (slli X, (XLen-8), (XLen-8) + C)
// This can occur when Zbb is enabled, which makes sext_inreg i16/i8 legal.
// This transform matches the code we get without Zbb. The shifts are more
// compressible, and this can help expose CSE opportunities in the sdiv by
// constant optimization.
auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
if (!N1C)
break;
SDValue N0 = Node->getOperand(0);
if (N0.getOpcode() != ISD::SIGN_EXTEND_INREG || !N0.hasOneUse())
break;
unsigned ShAmt = N1C->getZExtValue();
unsigned ExtSize =
cast<VTSDNode>(N0.getOperand(1))->getVT().getSizeInBits();
// ExtSize of 32 should use sraiw via tablegen pattern.
if (ExtSize >= 32 || ShAmt >= ExtSize)
break;
unsigned LShAmt = Subtarget->getXLen() - ExtSize;
SDNode *SLLI =
CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
CurDAG->getTargetConstant(LShAmt, DL, VT));
SDNode *SRAI = CurDAG->getMachineNode(
RISCV::SRAI, DL, VT, SDValue(SLLI, 0),
CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
ReplaceNode(Node, SRAI);
return;
}
case ISD::AND: {
auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
if (!N1C)
break;
SDValue N0 = Node->getOperand(0);
bool LeftShift = N0.getOpcode() == ISD::SHL;
if (!LeftShift && N0.getOpcode() != ISD::SRL)
break;
auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
if (!C)
break;
uint64_t C2 = C->getZExtValue();
unsigned XLen = Subtarget->getXLen();
if (!C2 || C2 >= XLen)
break;
uint64_t C1 = N1C->getZExtValue();
// Keep track of whether this is a andi, zext.h, or zext.w.
bool ZExtOrANDI = isInt<12>(N1C->getSExtValue());
if (C1 == UINT64_C(0xFFFF) &&
(Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp()))
ZExtOrANDI = true;
if (C1 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba())
ZExtOrANDI = true;
// Clear irrelevant bits in the mask.
if (LeftShift)
C1 &= maskTrailingZeros<uint64_t>(C2);
else
C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
// Some transforms should only be done if the shift has a single use or
// the AND would become (srli (slli X, 32), 32)
bool OneUseOrZExtW = N0.hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
SDValue X = N0.getOperand(0);
// Turn (and (srl x, c2) c1) -> (srli (slli x, c3-c2), c3) if c1 is a mask
// with c3 leading zeros.
if (!LeftShift && isMask_64(C1)) {
uint64_t C3 = XLen - (64 - countLeadingZeros(C1));
if (C2 < C3) {
// If the number of leading zeros is C2+32 this can be SRLIW.
if (C2 + 32 == C3) {
SDNode *SRLIW =
CurDAG->getMachineNode(RISCV::SRLIW, DL, XLenVT, X,
CurDAG->getTargetConstant(C2, DL, XLenVT));
ReplaceNode(Node, SRLIW);
return;
}
// (and (srl (sexti32 Y), c2), c1) -> (srliw (sraiw Y, 31), c3 - 32) if
// c1 is a mask with c3 leading zeros and c2 >= 32 and c3-c2==1.
//
// This pattern occurs when (i32 (srl (sra 31), c3 - 32)) is type
// legalized and goes through DAG combine.
SDValue Y;
if (C2 >= 32 && (C3 - C2) == 1 && N0.hasOneUse() &&
selectSExti32(X, Y)) {
SDNode *SRAIW =
CurDAG->getMachineNode(RISCV::SRAIW, DL, XLenVT, Y,
CurDAG->getTargetConstant(31, DL, XLenVT));
SDNode *SRLIW = CurDAG->getMachineNode(
RISCV::SRLIW, DL, XLenVT, SDValue(SRAIW, 0),
CurDAG->getTargetConstant(C3 - 32, DL, XLenVT));
ReplaceNode(Node, SRLIW);
return;
}
// (srli (slli x, c3-c2), c3).
if (OneUseOrZExtW && !ZExtOrANDI) {
SDNode *SLLI = CurDAG->getMachineNode(
RISCV::SLLI, DL, XLenVT, X,
CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
SDNode *SRLI =
CurDAG->getMachineNode(RISCV::SRLI, DL, XLenVT, SDValue(SLLI, 0),
CurDAG->getTargetConstant(C3, DL, XLenVT));
ReplaceNode(Node, SRLI);
return;
}
}
}
// Turn (and (shl x, c2), c1) -> (srli (slli c2+c3), c3) if c1 is a mask
// shifted by c2 bits with c3 leading zeros.
if (LeftShift && isShiftedMask_64(C1)) {
uint64_t C3 = XLen - (64 - countLeadingZeros(C1));
if (C2 + C3 < XLen &&
C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + C3)) << C2)) {
// Use slli.uw when possible.
if ((XLen - (C2 + C3)) == 32 && Subtarget->hasStdExtZba()) {
SDNode *SLLIUW =
CurDAG->getMachineNode(RISCV::SLLIUW, DL, XLenVT, X,
CurDAG->getTargetConstant(C2, DL, XLenVT));
ReplaceNode(Node, SLLIUW);
return;
}
// (srli (slli c2+c3), c3)
if (OneUseOrZExtW && !ZExtOrANDI) {
SDNode *SLLI = CurDAG->getMachineNode(
RISCV::SLLI, DL, XLenVT, X,
CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
SDNode *SRLI =
CurDAG->getMachineNode(RISCV::SRLI, DL, XLenVT, SDValue(SLLI, 0),
CurDAG->getTargetConstant(C3, DL, XLenVT));
ReplaceNode(Node, SRLI);
return;
}
}
}
// Turn (and (shr x, c2), c1) -> (slli (srli x, c2+c3), c3) if c1 is a
// shifted mask with c2 leading zeros and c3 trailing zeros.
if (!LeftShift && isShiftedMask_64(C1)) {
uint64_t Leading = XLen - (64 - countLeadingZeros(C1));
uint64_t C3 = countTrailingZeros(C1);
if (Leading == C2 && C2 + C3 < XLen && OneUseOrZExtW && !ZExtOrANDI) {
SDNode *SRLI = CurDAG->getMachineNode(
RISCV::SRLI, DL, XLenVT, X,
CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
SDNode *SLLI =
CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLI, 0),
CurDAG->getTargetConstant(C3, DL, XLenVT));
ReplaceNode(Node, SLLI);
return;
}
// If the leading zero count is C2+32, we can use SRLIW instead of SRLI.
if (Leading > 32 && (Leading - 32) == C2 && C2 + C3 < 32 &&
OneUseOrZExtW && !ZExtOrANDI) {
SDNode *SRLIW = CurDAG->getMachineNode(
RISCV::SRLIW, DL, XLenVT, X,
CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
SDNode *SLLI =
CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLIW, 0),
CurDAG->getTargetConstant(C3, DL, XLenVT));
ReplaceNode(Node, SLLI);
return;
}
}
// Turn (and (shl x, c2), c1) -> (slli (srli x, c3-c2), c3) if c1 is a
// shifted mask with no leading zeros and c3 trailing zeros.
if (LeftShift && isShiftedMask_64(C1)) {
uint64_t Leading = XLen - (64 - countLeadingZeros(C1));
uint64_t C3 = countTrailingZeros(C1);
if (Leading == 0 && C2 < C3 && OneUseOrZExtW && !ZExtOrANDI) {
SDNode *SRLI = CurDAG->getMachineNode(
RISCV::SRLI, DL, XLenVT, X,
CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
SDNode *SLLI =
CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLI, 0),
CurDAG->getTargetConstant(C3, DL, XLenVT));
ReplaceNode(Node, SLLI);
return;
}
// If we have (32-C2) leading zeros, we can use SRLIW instead of SRLI.
if (C2 < C3 && Leading + C2 == 32 && OneUseOrZExtW && !ZExtOrANDI) {
SDNode *SRLIW = CurDAG->getMachineNode(
RISCV::SRLIW, DL, XLenVT, X,
CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
SDNode *SLLI =
CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLIW, 0),
CurDAG->getTargetConstant(C3, DL, XLenVT));
ReplaceNode(Node, SLLI);
return;
}
}
break;
}
case ISD::MUL: {
// Special case for calculating (mul (and X, C2), C1) where the full product
// fits in XLen bits. We can shift X left by the number of leading zeros in
// C2 and shift C1 left by XLen-lzcnt(C2). This will ensure the final
// product has XLen trailing zeros, putting it in the output of MULHU. This
// can avoid materializing a constant in a register for C2.
// RHS should be a constant.
auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
if (!N1C || !N1C->hasOneUse())
break;
// LHS should be an AND with constant.
SDValue N0 = Node->getOperand(0);
if (N0.getOpcode() != ISD::AND || !isa<ConstantSDNode>(N0.getOperand(1)))
break;
uint64_t C2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
// Constant should be a mask.
if (!isMask_64(C2))
break;
// This should be the only use of the AND unless we will use
// (SRLI (SLLI X, 32), 32). We don't use a shift pair for other AND
// constants.
if (!N0.hasOneUse() && C2 != UINT64_C(0xFFFFFFFF))
break;
// If this can be an ANDI, ZEXT.H or ZEXT.W we don't need to do this
// optimization.
if (isInt<12>(C2) ||
(C2 == UINT64_C(0xFFFF) &&
(Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp())) ||
(C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba()))
break;
// We need to shift left the AND input and C1 by a total of XLen bits.
// How far left do we need to shift the AND input?
unsigned XLen = Subtarget->getXLen();
unsigned LeadingZeros = XLen - (64 - countLeadingZeros(C2));
// The constant gets shifted by the remaining amount unless that would
// shift bits out.
uint64_t C1 = N1C->getZExtValue();
unsigned ConstantShift = XLen - LeadingZeros;
if (ConstantShift > (XLen - (64 - countLeadingZeros(C1))))
break;
uint64_t ShiftedC1 = C1 << ConstantShift;
// If this RV32, we need to sign extend the constant.
if (XLen == 32)
ShiftedC1 = SignExtend64(ShiftedC1, 32);
// Create (mulhu (slli X, lzcnt(C2)), C1 << (XLen - lzcnt(C2))).
SDNode *Imm = selectImm(CurDAG, DL, VT, ShiftedC1, *Subtarget);
SDNode *SLLI =
CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0.getOperand(0),
CurDAG->getTargetConstant(LeadingZeros, DL, VT));
SDNode *MULHU = CurDAG->getMachineNode(RISCV::MULHU, DL, VT,
SDValue(SLLI, 0), SDValue(Imm, 0));
ReplaceNode(Node, MULHU);
return;
}
case ISD::INTRINSIC_WO_CHAIN: {
unsigned IntNo = Node->getConstantOperandVal(0);
switch (IntNo) {
// By default we do not custom select any intrinsic.
default:
break;
case Intrinsic::riscv_vmsgeu:
case Intrinsic::riscv_vmsge: {
SDValue Src1 = Node->getOperand(1);
SDValue Src2 = Node->getOperand(2);
// Only custom select scalar second operand.
if (Src2.getValueType() != XLenVT)
break;
// Small constants are handled with patterns.
if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
int64_t CVal = C->getSExtValue();
if (CVal >= -15 && CVal <= 16)
break;
}
bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
MVT Src1VT = Src1.getSimpleValueType();
unsigned VMSLTOpcode, VMNANDOpcode;
switch (RISCVTargetLowering::getLMUL(Src1VT)) {
default:
llvm_unreachable("Unexpected LMUL!");
#define CASE_VMSLT_VMNAND_OPCODES(lmulenum, suffix) \
case RISCVII::VLMUL::lmulenum: \
VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
: RISCV::PseudoVMSLT_VX_##suffix; \
VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
break;
CASE_VMSLT_VMNAND_OPCODES(LMUL_F8, MF8)
CASE_VMSLT_VMNAND_OPCODES(LMUL_F4, MF4)
CASE_VMSLT_VMNAND_OPCODES(LMUL_F2, MF2)
CASE_VMSLT_VMNAND_OPCODES(LMUL_1, M1)
CASE_VMSLT_VMNAND_OPCODES(LMUL_2, M2)
CASE_VMSLT_VMNAND_OPCODES(LMUL_4, M4)
CASE_VMSLT_VMNAND_OPCODES(LMUL_8, M8)
#undef CASE_VMSLT_VMNAND_OPCODES
}
SDValue SEW = CurDAG->getTargetConstant(
Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
SDValue VL;
selectVLOp(Node->getOperand(3), VL);
// Expand to
// vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
SDValue Cmp = SDValue(
CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
0);
ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
{Cmp, Cmp, VL, SEW}));
return;
}
case Intrinsic::riscv_vmsgeu_mask:
case Intrinsic::riscv_vmsge_mask: {
SDValue Src1 = Node->getOperand(2);
SDValue Src2 = Node->getOperand(3);
// Only custom select scalar second operand.
if (Src2.getValueType() != XLenVT)
break;
// Small constants are handled with patterns.
if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
int64_t CVal = C->getSExtValue();
if (CVal >= -15 && CVal <= 16)
break;
}
bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
MVT Src1VT = Src1.getSimpleValueType();
unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode;
switch (RISCVTargetLowering::getLMUL(Src1VT)) {
default:
llvm_unreachable("Unexpected LMUL!");
#define CASE_VMSLT_OPCODES(lmulenum, suffix) \
case RISCVII::VLMUL::lmulenum: \
VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
: RISCV::PseudoVMSLT_VX_##suffix; \
VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \
: RISCV::PseudoVMSLT_VX_##suffix##_MASK; \
break;
CASE_VMSLT_OPCODES(LMUL_F8, MF8)
CASE_VMSLT_OPCODES(LMUL_F4, MF4)
CASE_VMSLT_OPCODES(LMUL_F2, MF2)
CASE_VMSLT_OPCODES(LMUL_1, M1)
CASE_VMSLT_OPCODES(LMUL_2, M2)
CASE_VMSLT_OPCODES(LMUL_4, M4)
CASE_VMSLT_OPCODES(LMUL_8, M8)
#undef CASE_VMSLT_OPCODES
}
// Mask operations use the LMUL from the mask type.
switch (RISCVTargetLowering::getLMUL(VT)) {
default:
llvm_unreachable("Unexpected LMUL!");
#define CASE_VMXOR_VANDN_OPCODES(lmulenum, suffix) \
case RISCVII::VLMUL::lmulenum: \
VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \
VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \
break;
CASE_VMXOR_VANDN_OPCODES(LMUL_F8, MF8)
CASE_VMXOR_VANDN_OPCODES(LMUL_F4, MF4)
CASE_VMXOR_VANDN_OPCODES(LMUL_F2, MF2)
CASE_VMXOR_VANDN_OPCODES(LMUL_1, M1)
CASE_VMXOR_VANDN_OPCODES(LMUL_2, M2)
CASE_VMXOR_VANDN_OPCODES(LMUL_4, M4)
CASE_VMXOR_VANDN_OPCODES(LMUL_8, M8)
#undef CASE_VMXOR_VANDN_OPCODES
}
SDValue SEW = CurDAG->getTargetConstant(
Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
SDValue VL;
selectVLOp(Node->getOperand(5), VL);
SDValue MaskedOff = Node->getOperand(1);
SDValue Mask = Node->getOperand(4);
// If the MaskedOff value and the Mask are the same value use
// vmslt{u}.vx vt, va, x; vmandn.mm vd, vd, vt
// This avoids needing to copy v0 to vd before starting the next sequence.
if (Mask == MaskedOff) {
SDValue Cmp = SDValue(
CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
0);
ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOpcode, DL, VT,
{Mask, Cmp, VL, MaskSEW}));
return;
}
// Mask needs to be copied to V0.
SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL,
RISCV::V0, Mask, SDValue());
SDValue Glue = Chain.getValue(1);
SDValue V0 = CurDAG->getRegister(RISCV::V0, VT);
// Otherwise use
// vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
SDValue Cmp = SDValue(
CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
{MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
0);
ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
{Cmp, Mask, VL, MaskSEW}));
return;
}
}
break;
}
case ISD::INTRINSIC_W_CHAIN: {
unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
switch (IntNo) {
// By default we do not custom select any intrinsic.
default:
break;
case Intrinsic::riscv_vsetvli:
case Intrinsic::riscv_vsetvlimax: {
if (!Subtarget->hasVInstructions())
break;
bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
unsigned Offset = VLMax ? 2 : 3;
assert(Node->getNumOperands() == Offset + 2 &&
"Unexpected number of operands");
unsigned SEW =
RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7);
RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
Node->getConstantOperandVal(Offset + 1) & 0x7);
unsigned VTypeI = RISCVVType::encodeVTYPE(
VLMul, SEW, /*TailAgnostic*/ true, /*MaskAgnostic*/ false);
SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
SDValue VLOperand;
unsigned Opcode = RISCV::PseudoVSETVLI;
if (VLMax) {
VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
Opcode = RISCV::PseudoVSETVLIX0;
} else {
VLOperand = Node->getOperand(2);
if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
uint64_t AVL = C->getZExtValue();
if (isUInt<5>(AVL)) {
SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
ReplaceNode(
Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, XLenVT,
MVT::Other, VLImm, VTypeIOp,
/* Chain */ Node->getOperand(0)));
return;
}
}
}
ReplaceNode(Node,
CurDAG->getMachineNode(Opcode, DL, XLenVT,
MVT::Other, VLOperand, VTypeIOp,
/* Chain */ Node->getOperand(0)));
return;
}
case Intrinsic::riscv_vlseg2:
case Intrinsic::riscv_vlseg3:
case Intrinsic::riscv_vlseg4:
case Intrinsic::riscv_vlseg5:
case Intrinsic::riscv_vlseg6:
case Intrinsic::riscv_vlseg7:
case Intrinsic::riscv_vlseg8: {
selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
return;
}
case Intrinsic::riscv_vlseg2_mask:
case Intrinsic::riscv_vlseg3_mask:
case Intrinsic::riscv_vlseg4_mask:
case Intrinsic::riscv_vlseg5_mask:
case Intrinsic::riscv_vlseg6_mask:
case Intrinsic::riscv_vlseg7_mask:
case Intrinsic::riscv_vlseg8_mask: {
selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
return;
}
case Intrinsic::riscv_vlsseg2:
case Intrinsic::riscv_vlsseg3:
case Intrinsic::riscv_vlsseg4:
case Intrinsic::riscv_vlsseg5:
case Intrinsic::riscv_vlsseg6:
case Intrinsic::riscv_vlsseg7:
case Intrinsic::riscv_vlsseg8: {
selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
return;
}
case Intrinsic::riscv_vlsseg2_mask:
case Intrinsic::riscv_vlsseg3_mask:
case Intrinsic::riscv_vlsseg4_mask:
case Intrinsic::riscv_vlsseg5_mask:
case Intrinsic::riscv_vlsseg6_mask:
case Intrinsic::riscv_vlsseg7_mask:
case Intrinsic::riscv_vlsseg8_mask: {
selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
return;
}
case Intrinsic::riscv_vloxseg2:
case Intrinsic::riscv_vloxseg3:
case Intrinsic::riscv_vloxseg4:
case Intrinsic::riscv_vloxseg5:
case Intrinsic::riscv_vloxseg6:
case Intrinsic::riscv_vloxseg7:
case Intrinsic::riscv_vloxseg8:
selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
return;
case Intrinsic::riscv_vluxseg2:
case Intrinsic::riscv_vluxseg3:
case Intrinsic::riscv_vluxseg4:
case Intrinsic::riscv_vluxseg5:
case Intrinsic::riscv_vluxseg6:
case Intrinsic::riscv_vluxseg7:
case Intrinsic::riscv_vluxseg8:
selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
return;
case Intrinsic::riscv_vloxseg2_mask:
case Intrinsic::riscv_vloxseg3_mask:
case Intrinsic::riscv_vloxseg4_mask:
case Intrinsic::riscv_vloxseg5_mask:
case Intrinsic::riscv_vloxseg6_mask:
case Intrinsic::riscv_vloxseg7_mask:
case Intrinsic::riscv_vloxseg8_mask:
selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
return;
case Intrinsic::riscv_vluxseg2_mask:
case Intrinsic::riscv_vluxseg3_mask:
case Intrinsic::riscv_vluxseg4_mask:
case Intrinsic::riscv_vluxseg5_mask:
case Intrinsic::riscv_vluxseg6_mask:
case Intrinsic::riscv_vluxseg7_mask:
case Intrinsic::riscv_vluxseg8_mask:
selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
return;
case Intrinsic::riscv_vlseg8ff:
case Intrinsic::riscv_vlseg7ff:
case Intrinsic::riscv_vlseg6ff:
case Intrinsic::riscv_vlseg5ff:
case Intrinsic::riscv_vlseg4ff:
case Intrinsic::riscv_vlseg3ff:
case Intrinsic::riscv_vlseg2ff: {
selectVLSEGFF(Node, /*IsMasked*/ false);
return;
}
case Intrinsic::riscv_vlseg8ff_mask:
case Intrinsic::riscv_vlseg7ff_mask:
case Intrinsic::riscv_vlseg6ff_mask:
case Intrinsic::riscv_vlseg5ff_mask:
case Intrinsic::riscv_vlseg4ff_mask:
case Intrinsic::riscv_vlseg3ff_mask:
case Intrinsic::riscv_vlseg2ff_mask: {
selectVLSEGFF(Node, /*IsMasked*/ true);
return;
}
case Intrinsic::riscv_vloxei:
case Intrinsic::riscv_vloxei_mask:
case Intrinsic::riscv_vluxei:
case Intrinsic::riscv_vluxei_mask: {
bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
IntNo == Intrinsic::riscv_vluxei_mask;
bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
IntNo == Intrinsic::riscv_vloxei_mask;
MVT VT = Node->getSimpleValueType(0);
unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
unsigned CurOp = 2;
SmallVector<SDValue, 8> Operands;
if (IsMasked)
Operands.push_back(Node->getOperand(CurOp++));
MVT IndexVT;
addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
/*IsStridedOrIndexed*/ true, Operands,
/*IsLoad=*/true, &IndexVT);
assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
"Element count mismatch");
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
report_fatal_error("The V extension does not support EEW=64 for index "
"values when XLEN=32");
}
const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
MachineSDNode *Load =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
if (auto *MemOp = dyn_cast<MemSDNode>(Node))
CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
ReplaceNode(Node, Load);
return;
}
case Intrinsic::riscv_vlm:
case Intrinsic::riscv_vle:
case Intrinsic::riscv_vle_mask:
case Intrinsic::riscv_vlse:
case Intrinsic::riscv_vlse_mask: {
bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
IntNo == Intrinsic::riscv_vlse_mask;
bool IsStrided =
IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
MVT VT = Node->getSimpleValueType(0);
unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
unsigned CurOp = 2;
SmallVector<SDValue, 8> Operands;
if (IsMasked)
Operands.push_back(Node->getOperand(CurOp++));
addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
Operands, /*IsLoad=*/true);
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
const RISCV::VLEPseudo *P =
RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2SEW,
static_cast<unsigned>(LMUL));
MachineSDNode *Load =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
if (auto *MemOp = dyn_cast<MemSDNode>(Node))
CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
ReplaceNode(Node, Load);
return;
}
case Intrinsic::riscv_vleff:
case Intrinsic::riscv_vleff_mask: {
bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
MVT VT = Node->getSimpleValueType(0);
unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
unsigned CurOp = 2;
SmallVector<SDValue, 7> Operands;
if (IsMasked)
Operands.push_back(Node->getOperand(CurOp++));
addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
/*IsStridedOrIndexed*/ false, Operands,
/*IsLoad=*/true);
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
const RISCV::VLEPseudo *P =
RISCV::getVLEPseudo(IsMasked, /*Strided*/ false, /*FF*/ true, Log2SEW,
static_cast<unsigned>(LMUL));
MachineSDNode *Load =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0),
MVT::Other, MVT::Glue, Operands);
SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
/*Glue*/ SDValue(Load, 2));
if (auto *MemOp = dyn_cast<MemSDNode>(Node))
CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
ReplaceUses(SDValue(Node, 0), SDValue(Load, 0));
ReplaceUses(SDValue(Node, 1), SDValue(ReadVL, 0)); // VL
ReplaceUses(SDValue(Node, 2), SDValue(Load, 1)); // Chain
CurDAG->RemoveDeadNode(Node);
return;
}
}
break;
}
case ISD::INTRINSIC_VOID: {
unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
switch (IntNo) {
case Intrinsic::riscv_vsseg2:
case Intrinsic::riscv_vsseg3:
case Intrinsic::riscv_vsseg4:
case Intrinsic::riscv_vsseg5:
case Intrinsic::riscv_vsseg6:
case Intrinsic::riscv_vsseg7:
case Intrinsic::riscv_vsseg8: {
selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
return;
}
case Intrinsic::riscv_vsseg2_mask:
case Intrinsic::riscv_vsseg3_mask:
case Intrinsic::riscv_vsseg4_mask:
case Intrinsic::riscv_vsseg5_mask:
case Intrinsic::riscv_vsseg6_mask:
case Intrinsic::riscv_vsseg7_mask:
case Intrinsic::riscv_vsseg8_mask: {
selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
return;
}
case Intrinsic::riscv_vssseg2:
case Intrinsic::riscv_vssseg3:
case Intrinsic::riscv_vssseg4:
case Intrinsic::riscv_vssseg5:
case Intrinsic::riscv_vssseg6:
case Intrinsic::riscv_vssseg7:
case Intrinsic::riscv_vssseg8: {
selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
return;
}
case Intrinsic::riscv_vssseg2_mask:
case Intrinsic::riscv_vssseg3_mask:
case Intrinsic::riscv_vssseg4_mask:
case Intrinsic::riscv_vssseg5_mask:
case Intrinsic::riscv_vssseg6_mask:
case Intrinsic::riscv_vssseg7_mask:
case Intrinsic::riscv_vssseg8_mask: {
selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
return;
}
case Intrinsic::riscv_vsoxseg2:
case Intrinsic::riscv_vsoxseg3:
case Intrinsic::riscv_vsoxseg4:
case Intrinsic::riscv_vsoxseg5:
case Intrinsic::riscv_vsoxseg6:
case Intrinsic::riscv_vsoxseg7:
case Intrinsic::riscv_vsoxseg8:
selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
return;
case Intrinsic::riscv_vsuxseg2:
case Intrinsic::riscv_vsuxseg3:
case Intrinsic::riscv_vsuxseg4:
case Intrinsic::riscv_vsuxseg5:
case Intrinsic::riscv_vsuxseg6:
case Intrinsic::riscv_vsuxseg7:
case Intrinsic::riscv_vsuxseg8:
selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
return;
case Intrinsic::riscv_vsoxseg2_mask:
case Intrinsic::riscv_vsoxseg3_mask:
case Intrinsic::riscv_vsoxseg4_mask:
case Intrinsic::riscv_vsoxseg5_mask:
case Intrinsic::riscv_vsoxseg6_mask:
case Intrinsic::riscv_vsoxseg7_mask:
case Intrinsic::riscv_vsoxseg8_mask:
selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
return;
case Intrinsic::riscv_vsuxseg2_mask:
case Intrinsic::riscv_vsuxseg3_mask:
case Intrinsic::riscv_vsuxseg4_mask:
case Intrinsic::riscv_vsuxseg5_mask:
case Intrinsic::riscv_vsuxseg6_mask:
case Intrinsic::riscv_vsuxseg7_mask:
case Intrinsic::riscv_vsuxseg8_mask:
selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
return;
case Intrinsic::riscv_vsoxei:
case Intrinsic::riscv_vsoxei_mask:
case Intrinsic::riscv_vsuxei:
case Intrinsic::riscv_vsuxei_mask: {
bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
IntNo == Intrinsic::riscv_vsuxei_mask;
bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
IntNo == Intrinsic::riscv_vsoxei_mask;
MVT VT = Node->getOperand(2)->getSimpleValueType(0);
unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
unsigned CurOp = 2;
SmallVector<SDValue, 8> Operands;
Operands.push_back(Node->getOperand(CurOp++)); // Store value.
MVT IndexVT;
addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
/*IsStridedOrIndexed*/ true, Operands,
/*IsLoad=*/false, &IndexVT);
assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
"Element count mismatch");
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
report_fatal_error("The V extension does not support EEW=64 for index "
"values when XLEN=32");
}
const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
MachineSDNode *Store =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
if (auto *MemOp = dyn_cast<MemSDNode>(Node))
CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
ReplaceNode(Node, Store);
return;
}
case Intrinsic::riscv_vsm:
case Intrinsic::riscv_vse:
case Intrinsic::riscv_vse_mask:
case Intrinsic::riscv_vsse:
case Intrinsic::riscv_vsse_mask: {
bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
IntNo == Intrinsic::riscv_vsse_mask;
bool IsStrided =
IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
MVT VT = Node->getOperand(2)->getSimpleValueType(0);
unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
unsigned CurOp = 2;
SmallVector<SDValue, 8> Operands;
Operands.push_back(Node->getOperand(CurOp++)); // Store value.
addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
Operands);
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
MachineSDNode *Store =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
if (auto *MemOp = dyn_cast<MemSDNode>(Node))
CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
ReplaceNode(Node, Store);
return;
}
}
break;
}
case ISD::BITCAST: {
MVT SrcVT = Node->getOperand(0).getSimpleValueType();
// Just drop bitcasts between vectors if both are fixed or both are
// scalable.
if ((VT.isScalableVector() && SrcVT.isScalableVector()) ||
(VT.isFixedLengthVector() && SrcVT.isFixedLengthVector())) {
ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
CurDAG->RemoveDeadNode(Node);
return;
}
break;
}
case ISD::INSERT_SUBVECTOR: {
SDValue V = Node->getOperand(0);
SDValue SubV = Node->getOperand(1);
SDLoc DL(SubV);
auto Idx = Node->getConstantOperandVal(2);
MVT SubVecVT = SubV.getSimpleValueType();
const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
MVT SubVecContainerVT = SubVecVT;
// Establish the correct scalable-vector types for any fixed-length type.
if (SubVecVT.isFixedLengthVector())
SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
if (VT.isFixedLengthVector())
VT = TLI.getContainerForFixedLengthVector(VT);
const auto *TRI = Subtarget->getRegisterInfo();
unsigned SubRegIdx;
std::tie(SubRegIdx, Idx) =
RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
VT, SubVecContainerVT, Idx, TRI);
// If the Idx hasn't been completely eliminated then this is a subvector
// insert which doesn't naturally align to a vector register. These must
// be handled using instructions to manipulate the vector registers.
if (Idx != 0)
break;
RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
(void)IsSubVecPartReg; // Silence unused variable warning without asserts.
assert((!IsSubVecPartReg || V.isUndef()) &&
"Expecting lowering to have created legal INSERT_SUBVECTORs when "
"the subvector is smaller than a full-sized register");
// If we haven't set a SubRegIdx, then we must be going between
// equally-sized LMUL groups (e.g. VR -> VR). This can be done as a copy.
if (SubRegIdx == RISCV::NoSubRegister) {
unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT);
assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
InRegClassID &&
"Unexpected subvector extraction");
SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
DL, VT, SubV, RC);
ReplaceNode(Node, NewNode);
return;
}
SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV);
ReplaceNode(Node, Insert.getNode());
return;
}
case ISD::EXTRACT_SUBVECTOR: {
SDValue V = Node->getOperand(0);
auto Idx = Node->getConstantOperandVal(1);
MVT InVT = V.getSimpleValueType();
SDLoc DL(V);
const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
MVT SubVecContainerVT = VT;
// Establish the correct scalable-vector types for any fixed-length type.
if (VT.isFixedLengthVector())
SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
if (InVT.isFixedLengthVector())
InVT = TLI.getContainerForFixedLengthVector(InVT);
const auto *TRI = Subtarget->getRegisterInfo();
unsigned SubRegIdx;
std::tie(SubRegIdx, Idx) =
RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
InVT, SubVecContainerVT, Idx, TRI);
// If the Idx hasn't been completely eliminated then this is a subvector
// extract which doesn't naturally align to a vector register. These must
// be handled using instructions to manipulate the vector registers.
if (Idx != 0)
break;
// If we haven't set a SubRegIdx, then we must be going between
// equally-sized LMUL types (e.g. VR -> VR). This can be done as a copy.
if (SubRegIdx == RISCV::NoSubRegister) {
unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(InVT);
assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
InRegClassID &&
"Unexpected subvector extraction");
SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
SDNode *NewNode =
CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
ReplaceNode(Node, NewNode);
return;
}
SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V);
ReplaceNode(Node, Extract.getNode());
return;
}
case ISD::SPLAT_VECTOR:
case RISCVISD::VMV_V_X_VL:
case RISCVISD::VFMV_V_F_VL: {
// Try to match splat of a scalar load to a strided load with stride of x0.
SDValue Src = Node->getOperand(0);
auto *Ld = dyn_cast<LoadSDNode>(Src);
if (!Ld)
break;
EVT MemVT = Ld->getMemoryVT();
// The memory VT should be the same size as the element type.
if (MemVT.getStoreSize() != VT.getVectorElementType().getStoreSize())
break;
if (!IsProfitableToFold(Src, Node, Node) ||
!IsLegalToFold(Src, Node, Node, TM.getOptLevel()))
break;
SDValue VL;
if (Node->getOpcode() == ISD::SPLAT_VECTOR)
VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
else
selectVLOp(Node->getOperand(1), VL);
unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
SDValue Operands[] = {Ld->getBasePtr(),
CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
Ld->getChain()};
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
/*IsMasked*/ false, /*IsStrided*/ true, /*FF*/ false, Log2SEW,
static_cast<unsigned>(LMUL));
MachineSDNode *Load =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
if (auto *MemOp = dyn_cast<MemSDNode>(Node))
CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
ReplaceNode(Node, Load);
return;
}
}
// Select the default instruction.
SelectCode(Node);
}