bool AArch64AsmParser::MatchAndEmitInstruction()

in llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp [5457:5998]


bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
                                               OperandVector &Operands,
                                               MCStreamer &Out,
                                               uint64_t &ErrorInfo,
                                               bool MatchingInlineAsm) {
  assert(!Operands.empty() && "Unexpect empty operand list!");
  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
  assert(Op.isToken() && "Leading operand should always be a mnemonic!");

  StringRef Tok = Op.getToken();
  unsigned NumOperands = Operands.size();

  if (NumOperands == 4 && Tok == "lsl") {
    AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
    AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
    if (Op2.isScalarReg() && Op3.isImm()) {
      const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
      if (Op3CE) {
        uint64_t Op3Val = Op3CE->getValue();
        uint64_t NewOp3Val = 0;
        uint64_t NewOp4Val = 0;
        if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
                Op2.getReg())) {
          NewOp3Val = (32 - Op3Val) & 0x1f;
          NewOp4Val = 31 - Op3Val;
        } else {
          NewOp3Val = (64 - Op3Val) & 0x3f;
          NewOp4Val = 63 - Op3Val;
        }

        const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
        const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());

        Operands[0] =
            AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
        Operands.push_back(AArch64Operand::CreateImm(
            NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
        Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
                                                Op3.getEndLoc(), getContext());
      }
    }
  } else if (NumOperands == 4 && Tok == "bfc") {
    // FIXME: Horrible hack to handle BFC->BFM alias.
    AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
    AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
    AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);

    if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
      const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
      const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());

      if (LSBCE && WidthCE) {
        uint64_t LSB = LSBCE->getValue();
        uint64_t Width = WidthCE->getValue();

        uint64_t RegWidth = 0;
        if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
                Op1.getReg()))
          RegWidth = 64;
        else
          RegWidth = 32;

        if (LSB >= RegWidth)
          return Error(LSBOp.getStartLoc(),
                       "expected integer in range [0, 31]");
        if (Width < 1 || Width > RegWidth)
          return Error(WidthOp.getStartLoc(),
                       "expected integer in range [1, 32]");

        uint64_t ImmR = 0;
        if (RegWidth == 32)
          ImmR = (32 - LSB) & 0x1f;
        else
          ImmR = (64 - LSB) & 0x3f;

        uint64_t ImmS = Width - 1;

        if (ImmR != 0 && ImmS >= ImmR)
          return Error(WidthOp.getStartLoc(),
                       "requested insert overflows register");

        const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
        const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
        Operands[0] =
            AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
        Operands[2] = AArch64Operand::CreateReg(
            RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
            SMLoc(), SMLoc(), getContext());
        Operands[3] = AArch64Operand::CreateImm(
            ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
        Operands.emplace_back(
            AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
                                      WidthOp.getEndLoc(), getContext()));
      }
    }
  } else if (NumOperands == 5) {
    // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
    // UBFIZ -> UBFM aliases.
    if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
      AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
      AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
      AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);

      if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
        const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
        const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());

        if (Op3CE && Op4CE) {
          uint64_t Op3Val = Op3CE->getValue();
          uint64_t Op4Val = Op4CE->getValue();

          uint64_t RegWidth = 0;
          if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
                  Op1.getReg()))
            RegWidth = 64;
          else
            RegWidth = 32;

          if (Op3Val >= RegWidth)
            return Error(Op3.getStartLoc(),
                         "expected integer in range [0, 31]");
          if (Op4Val < 1 || Op4Val > RegWidth)
            return Error(Op4.getStartLoc(),
                         "expected integer in range [1, 32]");

          uint64_t NewOp3Val = 0;
          if (RegWidth == 32)
            NewOp3Val = (32 - Op3Val) & 0x1f;
          else
            NewOp3Val = (64 - Op3Val) & 0x3f;

          uint64_t NewOp4Val = Op4Val - 1;

          if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
            return Error(Op4.getStartLoc(),
                         "requested insert overflows register");

          const MCExpr *NewOp3 =
              MCConstantExpr::create(NewOp3Val, getContext());
          const MCExpr *NewOp4 =
              MCConstantExpr::create(NewOp4Val, getContext());
          Operands[3] = AArch64Operand::CreateImm(
              NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
          Operands[4] = AArch64Operand::CreateImm(
              NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
          if (Tok == "bfi")
            Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
                                                      getContext());
          else if (Tok == "sbfiz")
            Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
                                                      getContext());
          else if (Tok == "ubfiz")
            Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
                                                      getContext());
          else
            llvm_unreachable("No valid mnemonic for alias?");
        }
      }

      // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
      // UBFX -> UBFM aliases.
    } else if (NumOperands == 5 &&
               (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
      AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
      AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
      AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);

      if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
        const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
        const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());

        if (Op3CE && Op4CE) {
          uint64_t Op3Val = Op3CE->getValue();
          uint64_t Op4Val = Op4CE->getValue();

          uint64_t RegWidth = 0;
          if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
                  Op1.getReg()))
            RegWidth = 64;
          else
            RegWidth = 32;

          if (Op3Val >= RegWidth)
            return Error(Op3.getStartLoc(),
                         "expected integer in range [0, 31]");
          if (Op4Val < 1 || Op4Val > RegWidth)
            return Error(Op4.getStartLoc(),
                         "expected integer in range [1, 32]");

          uint64_t NewOp4Val = Op3Val + Op4Val - 1;

          if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
            return Error(Op4.getStartLoc(),
                         "requested extract overflows register");

          const MCExpr *NewOp4 =
              MCConstantExpr::create(NewOp4Val, getContext());
          Operands[4] = AArch64Operand::CreateImm(
              NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
          if (Tok == "bfxil")
            Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
                                                      getContext());
          else if (Tok == "sbfx")
            Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
                                                      getContext());
          else if (Tok == "ubfx")
            Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
                                                      getContext());
          else
            llvm_unreachable("No valid mnemonic for alias?");
        }
      }
    }
  }

  // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
  // instruction for FP registers correctly in some rare circumstances. Convert
  // it to a safe instruction and warn (because silently changing someone's
  // assembly is rude).
  if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
      NumOperands == 4 && Tok == "movi") {
    AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
    AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
    AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
    if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
        (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
      StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
      if (Suffix.lower() == ".2d" &&
          cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
        Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
                " correctly on this CPU, converting to equivalent movi.16b");
        // Switch the suffix to .16b.
        unsigned Idx = Op1.isToken() ? 1 : 2;
        Operands[Idx] =
            AArch64Operand::CreateToken(".16b", IDLoc, getContext());
      }
    }
  }

  // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
  //        InstAlias can't quite handle this since the reg classes aren't
  //        subclasses.
  if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
    // The source register can be Wn here, but the matcher expects a
    // GPR64. Twiddle it here if necessary.
    AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
    if (Op.isScalarReg()) {
      unsigned Reg = getXRegFromWReg(Op.getReg());
      Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
                                              Op.getStartLoc(), Op.getEndLoc(),
                                              getContext());
    }
  }
  // FIXME: Likewise for sxt[bh] with a Xd dst operand
  else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
    AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
    if (Op.isScalarReg() &&
        AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
            Op.getReg())) {
      // The source register can be Wn here, but the matcher expects a
      // GPR64. Twiddle it here if necessary.
      AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
      if (Op.isScalarReg()) {
        unsigned Reg = getXRegFromWReg(Op.getReg());
        Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
                                                Op.getStartLoc(),
                                                Op.getEndLoc(), getContext());
      }
    }
  }
  // FIXME: Likewise for uxt[bh] with a Xd dst operand
  else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
    AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
    if (Op.isScalarReg() &&
        AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
            Op.getReg())) {
      // The source register can be Wn here, but the matcher expects a
      // GPR32. Twiddle it here if necessary.
      AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
      if (Op.isScalarReg()) {
        unsigned Reg = getWRegFromXReg(Op.getReg());
        Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
                                                Op.getStartLoc(),
                                                Op.getEndLoc(), getContext());
      }
    }
  }

  MCInst Inst;
  FeatureBitset MissingFeatures;
  // First try to match against the secondary set of tables containing the
  // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
  unsigned MatchResult =
      MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
                           MatchingInlineAsm, 1);

  // If that fails, try against the alternate table containing long-form NEON:
  // "fadd v0.2s, v1.2s, v2.2s"
  if (MatchResult != Match_Success) {
    // But first, save the short-form match result: we can use it in case the
    // long-form match also fails.
    auto ShortFormNEONErrorInfo = ErrorInfo;
    auto ShortFormNEONMatchResult = MatchResult;
    auto ShortFormNEONMissingFeatures = MissingFeatures;

    MatchResult =
        MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
                             MatchingInlineAsm, 0);

    // Now, both matches failed, and the long-form match failed on the mnemonic
    // suffix token operand.  The short-form match failure is probably more
    // relevant: use it instead.
    if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
        Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
        ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
      MatchResult = ShortFormNEONMatchResult;
      ErrorInfo = ShortFormNEONErrorInfo;
      MissingFeatures = ShortFormNEONMissingFeatures;
    }
  }

  switch (MatchResult) {
  case Match_Success: {
    // Perform range checking and other semantic validations
    SmallVector<SMLoc, 8> OperandLocs;
    NumOperands = Operands.size();
    for (unsigned i = 1; i < NumOperands; ++i)
      OperandLocs.push_back(Operands[i]->getStartLoc());
    if (validateInstruction(Inst, IDLoc, OperandLocs))
      return true;

    Inst.setLoc(IDLoc);
    Out.emitInstruction(Inst, getSTI());
    return false;
  }
  case Match_MissingFeature: {
    assert(MissingFeatures.any() && "Unknown missing feature!");
    // Special case the error message for the very common case where only
    // a single subtarget feature is missing (neon, e.g.).
    std::string Msg = "instruction requires:";
    for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
      if (MissingFeatures[i]) {
        Msg += " ";
        Msg += getSubtargetFeatureName(i);
      }
    }
    return Error(IDLoc, Msg);
  }
  case Match_MnemonicFail:
    return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
  case Match_InvalidOperand: {
    SMLoc ErrorLoc = IDLoc;

    if (ErrorInfo != ~0ULL) {
      if (ErrorInfo >= Operands.size())
        return Error(IDLoc, "too few operands for instruction",
                     SMRange(IDLoc, getTok().getLoc()));

      ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
      if (ErrorLoc == SMLoc())
        ErrorLoc = IDLoc;
    }
    // If the match failed on a suffix token operand, tweak the diagnostic
    // accordingly.
    if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
        ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
      MatchResult = Match_InvalidSuffix;

    return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
  }
  case Match_InvalidTiedOperand:
  case Match_InvalidMemoryIndexed1:
  case Match_InvalidMemoryIndexed2:
  case Match_InvalidMemoryIndexed4:
  case Match_InvalidMemoryIndexed8:
  case Match_InvalidMemoryIndexed16:
  case Match_InvalidCondCode:
  case Match_AddSubRegExtendSmall:
  case Match_AddSubRegExtendLarge:
  case Match_AddSubSecondSource:
  case Match_LogicalSecondSource:
  case Match_AddSubRegShift32:
  case Match_AddSubRegShift64:
  case Match_InvalidMovImm32Shift:
  case Match_InvalidMovImm64Shift:
  case Match_InvalidFPImm:
  case Match_InvalidMemoryWExtend8:
  case Match_InvalidMemoryWExtend16:
  case Match_InvalidMemoryWExtend32:
  case Match_InvalidMemoryWExtend64:
  case Match_InvalidMemoryWExtend128:
  case Match_InvalidMemoryXExtend8:
  case Match_InvalidMemoryXExtend16:
  case Match_InvalidMemoryXExtend32:
  case Match_InvalidMemoryXExtend64:
  case Match_InvalidMemoryXExtend128:
  case Match_InvalidMemoryIndexed1SImm4:
  case Match_InvalidMemoryIndexed2SImm4:
  case Match_InvalidMemoryIndexed3SImm4:
  case Match_InvalidMemoryIndexed4SImm4:
  case Match_InvalidMemoryIndexed1SImm6:
  case Match_InvalidMemoryIndexed16SImm4:
  case Match_InvalidMemoryIndexed32SImm4:
  case Match_InvalidMemoryIndexed4SImm7:
  case Match_InvalidMemoryIndexed8SImm7:
  case Match_InvalidMemoryIndexed16SImm7:
  case Match_InvalidMemoryIndexed8UImm5:
  case Match_InvalidMemoryIndexed4UImm5:
  case Match_InvalidMemoryIndexed2UImm5:
  case Match_InvalidMemoryIndexed1UImm6:
  case Match_InvalidMemoryIndexed2UImm6:
  case Match_InvalidMemoryIndexed4UImm6:
  case Match_InvalidMemoryIndexed8UImm6:
  case Match_InvalidMemoryIndexed16UImm6:
  case Match_InvalidMemoryIndexedSImm6:
  case Match_InvalidMemoryIndexedSImm5:
  case Match_InvalidMemoryIndexedSImm8:
  case Match_InvalidMemoryIndexedSImm9:
  case Match_InvalidMemoryIndexed16SImm9:
  case Match_InvalidMemoryIndexed8SImm10:
  case Match_InvalidImm0_0:
  case Match_InvalidImm0_1:
  case Match_InvalidImm0_3:
  case Match_InvalidImm0_7:
  case Match_InvalidImm0_15:
  case Match_InvalidImm0_31:
  case Match_InvalidImm0_63:
  case Match_InvalidImm0_127:
  case Match_InvalidImm0_255:
  case Match_InvalidImm0_65535:
  case Match_InvalidImm1_8:
  case Match_InvalidImm1_16:
  case Match_InvalidImm1_32:
  case Match_InvalidImm1_64:
  case Match_InvalidSVEAddSubImm8:
  case Match_InvalidSVEAddSubImm16:
  case Match_InvalidSVEAddSubImm32:
  case Match_InvalidSVEAddSubImm64:
  case Match_InvalidSVECpyImm8:
  case Match_InvalidSVECpyImm16:
  case Match_InvalidSVECpyImm32:
  case Match_InvalidSVECpyImm64:
  case Match_InvalidIndexRange0_0:
  case Match_InvalidIndexRange1_1:
  case Match_InvalidIndexRange0_15:
  case Match_InvalidIndexRange0_7:
  case Match_InvalidIndexRange0_3:
  case Match_InvalidIndexRange0_1:
  case Match_InvalidSVEIndexRange0_63:
  case Match_InvalidSVEIndexRange0_31:
  case Match_InvalidSVEIndexRange0_15:
  case Match_InvalidSVEIndexRange0_7:
  case Match_InvalidSVEIndexRange0_3:
  case Match_InvalidLabel:
  case Match_InvalidComplexRotationEven:
  case Match_InvalidComplexRotationOdd:
  case Match_InvalidGPR64shifted8:
  case Match_InvalidGPR64shifted16:
  case Match_InvalidGPR64shifted32:
  case Match_InvalidGPR64shifted64:
  case Match_InvalidGPR64shifted128:
  case Match_InvalidGPR64NoXZRshifted8:
  case Match_InvalidGPR64NoXZRshifted16:
  case Match_InvalidGPR64NoXZRshifted32:
  case Match_InvalidGPR64NoXZRshifted64:
  case Match_InvalidGPR64NoXZRshifted128:
  case Match_InvalidZPR32UXTW8:
  case Match_InvalidZPR32UXTW16:
  case Match_InvalidZPR32UXTW32:
  case Match_InvalidZPR32UXTW64:
  case Match_InvalidZPR32SXTW8:
  case Match_InvalidZPR32SXTW16:
  case Match_InvalidZPR32SXTW32:
  case Match_InvalidZPR32SXTW64:
  case Match_InvalidZPR64UXTW8:
  case Match_InvalidZPR64SXTW8:
  case Match_InvalidZPR64UXTW16:
  case Match_InvalidZPR64SXTW16:
  case Match_InvalidZPR64UXTW32:
  case Match_InvalidZPR64SXTW32:
  case Match_InvalidZPR64UXTW64:
  case Match_InvalidZPR64SXTW64:
  case Match_InvalidZPR32LSL8:
  case Match_InvalidZPR32LSL16:
  case Match_InvalidZPR32LSL32:
  case Match_InvalidZPR32LSL64:
  case Match_InvalidZPR64LSL8:
  case Match_InvalidZPR64LSL16:
  case Match_InvalidZPR64LSL32:
  case Match_InvalidZPR64LSL64:
  case Match_InvalidZPR0:
  case Match_InvalidZPR8:
  case Match_InvalidZPR16:
  case Match_InvalidZPR32:
  case Match_InvalidZPR64:
  case Match_InvalidZPR128:
  case Match_InvalidZPR_3b8:
  case Match_InvalidZPR_3b16:
  case Match_InvalidZPR_3b32:
  case Match_InvalidZPR_4b16:
  case Match_InvalidZPR_4b32:
  case Match_InvalidZPR_4b64:
  case Match_InvalidSVEPredicateAnyReg:
  case Match_InvalidSVEPattern:
  case Match_InvalidSVEPredicateBReg:
  case Match_InvalidSVEPredicateHReg:
  case Match_InvalidSVEPredicateSReg:
  case Match_InvalidSVEPredicateDReg:
  case Match_InvalidSVEPredicate3bAnyReg:
  case Match_InvalidSVEExactFPImmOperandHalfOne:
  case Match_InvalidSVEExactFPImmOperandHalfTwo:
  case Match_InvalidSVEExactFPImmOperandZeroOne:
  case Match_InvalidMatrixTile32:
  case Match_InvalidMatrixTile64:
  case Match_InvalidMatrix:
  case Match_InvalidMatrixTileVectorH8:
  case Match_InvalidMatrixTileVectorH16:
  case Match_InvalidMatrixTileVectorH32:
  case Match_InvalidMatrixTileVectorH64:
  case Match_InvalidMatrixTileVectorH128:
  case Match_InvalidMatrixTileVectorV8:
  case Match_InvalidMatrixTileVectorV16:
  case Match_InvalidMatrixTileVectorV32:
  case Match_InvalidMatrixTileVectorV64:
  case Match_InvalidMatrixTileVectorV128:
  case Match_InvalidSVCR:
  case Match_InvalidMatrixIndexGPR32_12_15:
  case Match_MSR:
  case Match_MRS: {
    if (ErrorInfo >= Operands.size())
      return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
    // Any time we get here, there's nothing fancy to do. Just get the
    // operand SMLoc and display the diagnostic.
    SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
    if (ErrorLoc == SMLoc())
      ErrorLoc = IDLoc;
    return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
  }
  }

  llvm_unreachable("Implement any new match types added!");
}