void Verifier::visitIntrinsicCall()

in llvm/lib/IR/Verifier.cpp [4661:5437]


void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
  Function *IF = Call.getCalledFunction();
  Assert(IF->isDeclaration(), "Intrinsic functions should never be defined!",
         IF);

  // Verify that the intrinsic prototype lines up with what the .td files
  // describe.
  FunctionType *IFTy = IF->getFunctionType();
  bool IsVarArg = IFTy->isVarArg();

  SmallVector<Intrinsic::IITDescriptor, 8> Table;
  getIntrinsicInfoTableEntries(ID, Table);
  ArrayRef<Intrinsic::IITDescriptor> TableRef = Table;

  // Walk the descriptors to extract overloaded types.
  SmallVector<Type *, 4> ArgTys;
  Intrinsic::MatchIntrinsicTypesResult Res =
      Intrinsic::matchIntrinsicSignature(IFTy, TableRef, ArgTys);
  Assert(Res != Intrinsic::MatchIntrinsicTypes_NoMatchRet,
         "Intrinsic has incorrect return type!", IF);
  Assert(Res != Intrinsic::MatchIntrinsicTypes_NoMatchArg,
         "Intrinsic has incorrect argument type!", IF);

  // Verify if the intrinsic call matches the vararg property.
  if (IsVarArg)
    Assert(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef),
           "Intrinsic was not defined with variable arguments!", IF);
  else
    Assert(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef),
           "Callsite was not defined with variable arguments!", IF);

  // All descriptors should be absorbed by now.
  Assert(TableRef.empty(), "Intrinsic has too few arguments!", IF);

  // Now that we have the intrinsic ID and the actual argument types (and we
  // know they are legal for the intrinsic!) get the intrinsic name through the
  // usual means.  This allows us to verify the mangling of argument types into
  // the name.
  const std::string ExpectedName =
      Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
  Assert(ExpectedName == IF->getName(),
         "Intrinsic name not mangled correctly for type arguments! "
         "Should be: " +
             ExpectedName,
         IF);

  // If the intrinsic takes MDNode arguments, verify that they are either global
  // or are local to *this* function.
  for (Value *V : Call.args()) {
    if (auto *MD = dyn_cast<MetadataAsValue>(V))
      visitMetadataAsValue(*MD, Call.getCaller());
    if (auto *Const = dyn_cast<Constant>(V))
      Assert(!Const->getType()->isX86_AMXTy(),
             "const x86_amx is not allowed in argument!");
  }

  switch (ID) {
  default:
    break;
  case Intrinsic::assume: {
    for (auto &Elem : Call.bundle_op_infos()) {
      Assert(Elem.Tag->getKey() == "ignore" ||
                 Attribute::isExistingAttribute(Elem.Tag->getKey()),
             "tags must be valid attribute names", Call);
      Attribute::AttrKind Kind =
          Attribute::getAttrKindFromName(Elem.Tag->getKey());
      unsigned ArgCount = Elem.End - Elem.Begin;
      if (Kind == Attribute::Alignment) {
        Assert(ArgCount <= 3 && ArgCount >= 2,
               "alignment assumptions should have 2 or 3 arguments", Call);
        Assert(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
               "first argument should be a pointer", Call);
        Assert(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
               "second argument should be an integer", Call);
        if (ArgCount == 3)
          Assert(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
                 "third argument should be an integer if present", Call);
        return;
      }
      Assert(ArgCount <= 2, "too many arguments", Call);
      if (Kind == Attribute::None)
        break;
      if (Attribute::isIntAttrKind(Kind)) {
        Assert(ArgCount == 2, "this attribute should have 2 arguments", Call);
        Assert(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
               "the second argument should be a constant integral value", Call);
      } else if (Attribute::canUseAsParamAttr(Kind)) {
        Assert((ArgCount) == 1, "this attribute should have one argument",
               Call);
      } else if (Attribute::canUseAsFnAttr(Kind)) {
        Assert((ArgCount) == 0, "this attribute has no argument", Call);
      }
    }
    break;
  }
  case Intrinsic::coro_id: {
    auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
    if (isa<ConstantPointerNull>(InfoArg))
      break;
    auto *GV = dyn_cast<GlobalVariable>(InfoArg);
    Assert(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
           "info argument of llvm.coro.id must refer to an initialized "
           "constant");
    Constant *Init = GV->getInitializer();
    Assert(isa<ConstantStruct>(Init) || isa<ConstantArray>(Init),
           "info argument of llvm.coro.id must refer to either a struct or "
           "an array");
    break;
  }
#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC)                        \
  case Intrinsic::INTRINSIC:
#include "llvm/IR/ConstrainedOps.def"
    visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
    break;
  case Intrinsic::dbg_declare: // llvm.dbg.declare
    Assert(isa<MetadataAsValue>(Call.getArgOperand(0)),
           "invalid llvm.dbg.declare intrinsic call 1", Call);
    visitDbgIntrinsic("declare", cast<DbgVariableIntrinsic>(Call));
    break;
  case Intrinsic::dbg_addr: // llvm.dbg.addr
    visitDbgIntrinsic("addr", cast<DbgVariableIntrinsic>(Call));
    break;
  case Intrinsic::dbg_value: // llvm.dbg.value
    visitDbgIntrinsic("value", cast<DbgVariableIntrinsic>(Call));
    break;
  case Intrinsic::dbg_label: // llvm.dbg.label
    visitDbgLabelIntrinsic("label", cast<DbgLabelInst>(Call));
    break;
  case Intrinsic::memcpy:
  case Intrinsic::memcpy_inline:
  case Intrinsic::memmove:
  case Intrinsic::memset: {
    const auto *MI = cast<MemIntrinsic>(&Call);
    auto IsValidAlignment = [&](unsigned Alignment) -> bool {
      return Alignment == 0 || isPowerOf2_32(Alignment);
    };
    Assert(IsValidAlignment(MI->getDestAlignment()),
           "alignment of arg 0 of memory intrinsic must be 0 or a power of 2",
           Call);
    if (const auto *MTI = dyn_cast<MemTransferInst>(MI)) {
      Assert(IsValidAlignment(MTI->getSourceAlignment()),
             "alignment of arg 1 of memory intrinsic must be 0 or a power of 2",
             Call);
    }

    break;
  }
  case Intrinsic::memcpy_element_unordered_atomic:
  case Intrinsic::memmove_element_unordered_atomic:
  case Intrinsic::memset_element_unordered_atomic: {
    const auto *AMI = cast<AtomicMemIntrinsic>(&Call);

    ConstantInt *ElementSizeCI =
        cast<ConstantInt>(AMI->getRawElementSizeInBytes());
    const APInt &ElementSizeVal = ElementSizeCI->getValue();
    Assert(ElementSizeVal.isPowerOf2(),
           "element size of the element-wise atomic memory intrinsic "
           "must be a power of 2",
           Call);

    auto IsValidAlignment = [&](uint64_t Alignment) {
      return isPowerOf2_64(Alignment) && ElementSizeVal.ule(Alignment);
    };
    uint64_t DstAlignment = AMI->getDestAlignment();
    Assert(IsValidAlignment(DstAlignment),
           "incorrect alignment of the destination argument", Call);
    if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
      uint64_t SrcAlignment = AMT->getSourceAlignment();
      Assert(IsValidAlignment(SrcAlignment),
             "incorrect alignment of the source argument", Call);
    }
    break;
  }
  case Intrinsic::call_preallocated_setup: {
    auto *NumArgs = dyn_cast<ConstantInt>(Call.getArgOperand(0));
    Assert(NumArgs != nullptr,
           "llvm.call.preallocated.setup argument must be a constant");
    bool FoundCall = false;
    for (User *U : Call.users()) {
      auto *UseCall = dyn_cast<CallBase>(U);
      Assert(UseCall != nullptr,
             "Uses of llvm.call.preallocated.setup must be calls");
      const Function *Fn = UseCall->getCalledFunction();
      if (Fn && Fn->getIntrinsicID() == Intrinsic::call_preallocated_arg) {
        auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
        Assert(AllocArgIndex != nullptr,
               "llvm.call.preallocated.alloc arg index must be a constant");
        auto AllocArgIndexInt = AllocArgIndex->getValue();
        Assert(AllocArgIndexInt.sge(0) &&
                   AllocArgIndexInt.slt(NumArgs->getValue()),
               "llvm.call.preallocated.alloc arg index must be between 0 and "
               "corresponding "
               "llvm.call.preallocated.setup's argument count");
      } else if (Fn && Fn->getIntrinsicID() ==
                           Intrinsic::call_preallocated_teardown) {
        // nothing to do
      } else {
        Assert(!FoundCall, "Can have at most one call corresponding to a "
                           "llvm.call.preallocated.setup");
        FoundCall = true;
        size_t NumPreallocatedArgs = 0;
        for (unsigned i = 0; i < UseCall->arg_size(); i++) {
          if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
            ++NumPreallocatedArgs;
          }
        }
        Assert(NumPreallocatedArgs != 0,
               "cannot use preallocated intrinsics on a call without "
               "preallocated arguments");
        Assert(NumArgs->equalsInt(NumPreallocatedArgs),
               "llvm.call.preallocated.setup arg size must be equal to number "
               "of preallocated arguments "
               "at call site",
               Call, *UseCall);
        // getOperandBundle() cannot be called if more than one of the operand
        // bundle exists. There is already a check elsewhere for this, so skip
        // here if we see more than one.
        if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
            1) {
          return;
        }
        auto PreallocatedBundle =
            UseCall->getOperandBundle(LLVMContext::OB_preallocated);
        Assert(PreallocatedBundle,
               "Use of llvm.call.preallocated.setup outside intrinsics "
               "must be in \"preallocated\" operand bundle");
        Assert(PreallocatedBundle->Inputs.front().get() == &Call,
               "preallocated bundle must have token from corresponding "
               "llvm.call.preallocated.setup");
      }
    }
    break;
  }
  case Intrinsic::call_preallocated_arg: {
    auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
    Assert(Token && Token->getCalledFunction()->getIntrinsicID() ==
                        Intrinsic::call_preallocated_setup,
           "llvm.call.preallocated.arg token argument must be a "
           "llvm.call.preallocated.setup");
    Assert(Call.hasFnAttr(Attribute::Preallocated),
           "llvm.call.preallocated.arg must be called with a \"preallocated\" "
           "call site attribute");
    break;
  }
  case Intrinsic::call_preallocated_teardown: {
    auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
    Assert(Token && Token->getCalledFunction()->getIntrinsicID() ==
                        Intrinsic::call_preallocated_setup,
           "llvm.call.preallocated.teardown token argument must be a "
           "llvm.call.preallocated.setup");
    break;
  }
  case Intrinsic::gcroot:
  case Intrinsic::gcwrite:
  case Intrinsic::gcread:
    if (ID == Intrinsic::gcroot) {
      AllocaInst *AI =
          dyn_cast<AllocaInst>(Call.getArgOperand(0)->stripPointerCasts());
      Assert(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
      Assert(isa<Constant>(Call.getArgOperand(1)),
             "llvm.gcroot parameter #2 must be a constant.", Call);
      if (!AI->getAllocatedType()->isPointerTy()) {
        Assert(!isa<ConstantPointerNull>(Call.getArgOperand(1)),
               "llvm.gcroot parameter #1 must either be a pointer alloca, "
               "or argument #2 must be a non-null constant.",
               Call);
      }
    }

    Assert(Call.getParent()->getParent()->hasGC(),
           "Enclosing function does not use GC.", Call);
    break;
  case Intrinsic::init_trampoline:
    Assert(isa<Function>(Call.getArgOperand(1)->stripPointerCasts()),
           "llvm.init_trampoline parameter #2 must resolve to a function.",
           Call);
    break;
  case Intrinsic::prefetch:
    Assert(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2 &&
           cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
           "invalid arguments to llvm.prefetch", Call);
    break;
  case Intrinsic::stackprotector:
    Assert(isa<AllocaInst>(Call.getArgOperand(1)->stripPointerCasts()),
           "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
    break;
  case Intrinsic::localescape: {
    BasicBlock *BB = Call.getParent();
    Assert(BB == &BB->getParent()->front(),
           "llvm.localescape used outside of entry block", Call);
    Assert(!SawFrameEscape,
           "multiple calls to llvm.localescape in one function", Call);
    for (Value *Arg : Call.args()) {
      if (isa<ConstantPointerNull>(Arg))
        continue; // Null values are allowed as placeholders.
      auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
      Assert(AI && AI->isStaticAlloca(),
             "llvm.localescape only accepts static allocas", Call);
    }
    FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
    SawFrameEscape = true;
    break;
  }
  case Intrinsic::localrecover: {
    Value *FnArg = Call.getArgOperand(0)->stripPointerCasts();
    Function *Fn = dyn_cast<Function>(FnArg);
    Assert(Fn && !Fn->isDeclaration(),
           "llvm.localrecover first "
           "argument must be function defined in this module",
           Call);
    auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
    auto &Entry = FrameEscapeInfo[Fn];
    Entry.second = unsigned(
        std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
    break;
  }

  case Intrinsic::experimental_gc_statepoint:
    if (auto *CI = dyn_cast<CallInst>(&Call))
      Assert(!CI->isInlineAsm(),
             "gc.statepoint support for inline assembly unimplemented", CI);
    Assert(Call.getParent()->getParent()->hasGC(),
           "Enclosing function does not use GC.", Call);

    verifyStatepoint(Call);
    break;
  case Intrinsic::experimental_gc_result: {
    Assert(Call.getParent()->getParent()->hasGC(),
           "Enclosing function does not use GC.", Call);
    // Are we tied to a statepoint properly?
    const auto *StatepointCall = dyn_cast<CallBase>(Call.getArgOperand(0));
    const Function *StatepointFn =
        StatepointCall ? StatepointCall->getCalledFunction() : nullptr;
    Assert(StatepointFn && StatepointFn->isDeclaration() &&
               StatepointFn->getIntrinsicID() ==
                   Intrinsic::experimental_gc_statepoint,
           "gc.result operand #1 must be from a statepoint", Call,
           Call.getArgOperand(0));

    // Assert that result type matches wrapped callee.
    const Value *Target = StatepointCall->getArgOperand(2);
    auto *PT = cast<PointerType>(Target->getType());
    auto *TargetFuncType = cast<FunctionType>(PT->getElementType());
    Assert(Call.getType() == TargetFuncType->getReturnType(),
           "gc.result result type does not match wrapped callee", Call);
    break;
  }
  case Intrinsic::experimental_gc_relocate: {
    Assert(Call.arg_size() == 3, "wrong number of arguments", Call);

    Assert(isa<PointerType>(Call.getType()->getScalarType()),
           "gc.relocate must return a pointer or a vector of pointers", Call);

    // Check that this relocate is correctly tied to the statepoint

    // This is case for relocate on the unwinding path of an invoke statepoint
    if (LandingPadInst *LandingPad =
            dyn_cast<LandingPadInst>(Call.getArgOperand(0))) {

      const BasicBlock *InvokeBB =
          LandingPad->getParent()->getUniquePredecessor();

      // Landingpad relocates should have only one predecessor with invoke
      // statepoint terminator
      Assert(InvokeBB, "safepoints should have unique landingpads",
             LandingPad->getParent());
      Assert(InvokeBB->getTerminator(), "safepoint block should be well formed",
             InvokeBB);
      Assert(isa<GCStatepointInst>(InvokeBB->getTerminator()),
             "gc relocate should be linked to a statepoint", InvokeBB);
    } else {
      // In all other cases relocate should be tied to the statepoint directly.
      // This covers relocates on a normal return path of invoke statepoint and
      // relocates of a call statepoint.
      auto Token = Call.getArgOperand(0);
      Assert(isa<GCStatepointInst>(Token),
             "gc relocate is incorrectly tied to the statepoint", Call, Token);
    }

    // Verify rest of the relocate arguments.
    const CallBase &StatepointCall =
      *cast<GCRelocateInst>(Call).getStatepoint();

    // Both the base and derived must be piped through the safepoint.
    Value *Base = Call.getArgOperand(1);
    Assert(isa<ConstantInt>(Base),
           "gc.relocate operand #2 must be integer offset", Call);

    Value *Derived = Call.getArgOperand(2);
    Assert(isa<ConstantInt>(Derived),
           "gc.relocate operand #3 must be integer offset", Call);

    const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
    const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();

    // Check the bounds
    if (auto Opt = StatepointCall.getOperandBundle(LLVMContext::OB_gc_live)) {
      Assert(BaseIndex < Opt->Inputs.size(),
             "gc.relocate: statepoint base index out of bounds", Call);
      Assert(DerivedIndex < Opt->Inputs.size(),
             "gc.relocate: statepoint derived index out of bounds", Call);
    }

    // Relocated value must be either a pointer type or vector-of-pointer type,
    // but gc_relocate does not need to return the same pointer type as the
    // relocated pointer. It can be casted to the correct type later if it's
    // desired. However, they must have the same address space and 'vectorness'
    GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
    Assert(Relocate.getDerivedPtr()->getType()->isPtrOrPtrVectorTy(),
           "gc.relocate: relocated value must be a gc pointer", Call);

    auto ResultType = Call.getType();
    auto DerivedType = Relocate.getDerivedPtr()->getType();
    Assert(ResultType->isVectorTy() == DerivedType->isVectorTy(),
           "gc.relocate: vector relocates to vector and pointer to pointer",
           Call);
    Assert(
        ResultType->getPointerAddressSpace() ==
            DerivedType->getPointerAddressSpace(),
        "gc.relocate: relocating a pointer shouldn't change its address space",
        Call);
    break;
  }
  case Intrinsic::eh_exceptioncode:
  case Intrinsic::eh_exceptionpointer: {
    Assert(isa<CatchPadInst>(Call.getArgOperand(0)),
           "eh.exceptionpointer argument must be a catchpad", Call);
    break;
  }
  case Intrinsic::get_active_lane_mask: {
    Assert(Call.getType()->isVectorTy(), "get_active_lane_mask: must return a "
           "vector", Call);
    auto *ElemTy = Call.getType()->getScalarType();
    Assert(ElemTy->isIntegerTy(1), "get_active_lane_mask: element type is not "
           "i1", Call);
    break;
  }
  case Intrinsic::masked_load: {
    Assert(Call.getType()->isVectorTy(), "masked_load: must return a vector",
           Call);

    Value *Ptr = Call.getArgOperand(0);
    ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
    Value *Mask = Call.getArgOperand(2);
    Value *PassThru = Call.getArgOperand(3);
    Assert(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
           Call);
    Assert(Alignment->getValue().isPowerOf2(),
           "masked_load: alignment must be a power of 2", Call);

    PointerType *PtrTy = cast<PointerType>(Ptr->getType());
    Assert(PtrTy->isOpaqueOrPointeeTypeMatches(Call.getType()),
           "masked_load: return must match pointer type", Call);
    Assert(PassThru->getType() == Call.getType(),
           "masked_load: pass through and return type must match", Call);
    Assert(cast<VectorType>(Mask->getType())->getElementCount() ==
               cast<VectorType>(Call.getType())->getElementCount(),
           "masked_load: vector mask must be same length as return", Call);
    break;
  }
  case Intrinsic::masked_store: {
    Value *Val = Call.getArgOperand(0);
    Value *Ptr = Call.getArgOperand(1);
    ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
    Value *Mask = Call.getArgOperand(3);
    Assert(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
           Call);
    Assert(Alignment->getValue().isPowerOf2(),
           "masked_store: alignment must be a power of 2", Call);

    PointerType *PtrTy = cast<PointerType>(Ptr->getType());
    Assert(PtrTy->isOpaqueOrPointeeTypeMatches(Val->getType()),
           "masked_store: storee must match pointer type", Call);
    Assert(cast<VectorType>(Mask->getType())->getElementCount() ==
               cast<VectorType>(Val->getType())->getElementCount(),
           "masked_store: vector mask must be same length as value", Call);
    break;
  }

  case Intrinsic::masked_gather: {
    const APInt &Alignment =
        cast<ConstantInt>(Call.getArgOperand(1))->getValue();
    Assert(Alignment.isZero() || Alignment.isPowerOf2(),
           "masked_gather: alignment must be 0 or a power of 2", Call);
    break;
  }
  case Intrinsic::masked_scatter: {
    const APInt &Alignment =
        cast<ConstantInt>(Call.getArgOperand(2))->getValue();
    Assert(Alignment.isZero() || Alignment.isPowerOf2(),
           "masked_scatter: alignment must be 0 or a power of 2", Call);
    break;
  }

  case Intrinsic::experimental_guard: {
    Assert(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
    Assert(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
           "experimental_guard must have exactly one "
           "\"deopt\" operand bundle");
    break;
  }

  case Intrinsic::experimental_deoptimize: {
    Assert(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
           Call);
    Assert(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
           "experimental_deoptimize must have exactly one "
           "\"deopt\" operand bundle");
    Assert(Call.getType() == Call.getFunction()->getReturnType(),
           "experimental_deoptimize return type must match caller return type");

    if (isa<CallInst>(Call)) {
      auto *RI = dyn_cast<ReturnInst>(Call.getNextNode());
      Assert(RI,
             "calls to experimental_deoptimize must be followed by a return");

      if (!Call.getType()->isVoidTy() && RI)
        Assert(RI->getReturnValue() == &Call,
               "calls to experimental_deoptimize must be followed by a return "
               "of the value computed by experimental_deoptimize");
    }

    break;
  }
  case Intrinsic::vector_reduce_and:
  case Intrinsic::vector_reduce_or:
  case Intrinsic::vector_reduce_xor:
  case Intrinsic::vector_reduce_add:
  case Intrinsic::vector_reduce_mul:
  case Intrinsic::vector_reduce_smax:
  case Intrinsic::vector_reduce_smin:
  case Intrinsic::vector_reduce_umax:
  case Intrinsic::vector_reduce_umin: {
    Type *ArgTy = Call.getArgOperand(0)->getType();
    Assert(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
           "Intrinsic has incorrect argument type!");
    break;
  }
  case Intrinsic::vector_reduce_fmax:
  case Intrinsic::vector_reduce_fmin: {
    Type *ArgTy = Call.getArgOperand(0)->getType();
    Assert(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
           "Intrinsic has incorrect argument type!");
    break;
  }
  case Intrinsic::vector_reduce_fadd:
  case Intrinsic::vector_reduce_fmul: {
    // Unlike the other reductions, the first argument is a start value. The
    // second argument is the vector to be reduced.
    Type *ArgTy = Call.getArgOperand(1)->getType();
    Assert(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
           "Intrinsic has incorrect argument type!");
    break;
  }
  case Intrinsic::smul_fix:
  case Intrinsic::smul_fix_sat:
  case Intrinsic::umul_fix:
  case Intrinsic::umul_fix_sat:
  case Intrinsic::sdiv_fix:
  case Intrinsic::sdiv_fix_sat:
  case Intrinsic::udiv_fix:
  case Intrinsic::udiv_fix_sat: {
    Value *Op1 = Call.getArgOperand(0);
    Value *Op2 = Call.getArgOperand(1);
    Assert(Op1->getType()->isIntOrIntVectorTy(),
           "first operand of [us][mul|div]_fix[_sat] must be an int type or "
           "vector of ints");
    Assert(Op2->getType()->isIntOrIntVectorTy(),
           "second operand of [us][mul|div]_fix[_sat] must be an int type or "
           "vector of ints");

    auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
    Assert(Op3->getType()->getBitWidth() <= 32,
           "third argument of [us][mul|div]_fix[_sat] must fit within 32 bits");

    if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
        ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
      Assert(
          Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
          "the scale of s[mul|div]_fix[_sat] must be less than the width of "
          "the operands");
    } else {
      Assert(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
             "the scale of u[mul|div]_fix[_sat] must be less than or equal "
             "to the width of the operands");
    }
    break;
  }
  case Intrinsic::lround:
  case Intrinsic::llround:
  case Intrinsic::lrint:
  case Intrinsic::llrint: {
    Type *ValTy = Call.getArgOperand(0)->getType();
    Type *ResultTy = Call.getType();
    Assert(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
           "Intrinsic does not support vectors", &Call);
    break;
  }
  case Intrinsic::bswap: {
    Type *Ty = Call.getType();
    unsigned Size = Ty->getScalarSizeInBits();
    Assert(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
    break;
  }
  case Intrinsic::invariant_start: {
    ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
    Assert(InvariantSize &&
               (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
           "invariant_start parameter must be -1, 0 or a positive number",
           &Call);
    break;
  }
  case Intrinsic::matrix_multiply:
  case Intrinsic::matrix_transpose:
  case Intrinsic::matrix_column_major_load:
  case Intrinsic::matrix_column_major_store: {
    Function *IF = Call.getCalledFunction();
    ConstantInt *Stride = nullptr;
    ConstantInt *NumRows;
    ConstantInt *NumColumns;
    VectorType *ResultTy;
    Type *Op0ElemTy = nullptr;
    Type *Op1ElemTy = nullptr;
    switch (ID) {
    case Intrinsic::matrix_multiply:
      NumRows = cast<ConstantInt>(Call.getArgOperand(2));
      NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
      ResultTy = cast<VectorType>(Call.getType());
      Op0ElemTy =
          cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
      Op1ElemTy =
          cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
      break;
    case Intrinsic::matrix_transpose:
      NumRows = cast<ConstantInt>(Call.getArgOperand(1));
      NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
      ResultTy = cast<VectorType>(Call.getType());
      Op0ElemTy =
          cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
      break;
    case Intrinsic::matrix_column_major_load: {
      Stride = dyn_cast<ConstantInt>(Call.getArgOperand(1));
      NumRows = cast<ConstantInt>(Call.getArgOperand(3));
      NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
      ResultTy = cast<VectorType>(Call.getType());

      PointerType *Op0PtrTy =
          cast<PointerType>(Call.getArgOperand(0)->getType());
      if (!Op0PtrTy->isOpaque())
        Op0ElemTy = Op0PtrTy->getElementType();
      break;
    }
    case Intrinsic::matrix_column_major_store: {
      Stride = dyn_cast<ConstantInt>(Call.getArgOperand(2));
      NumRows = cast<ConstantInt>(Call.getArgOperand(4));
      NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
      ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
      Op0ElemTy =
          cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();

      PointerType *Op1PtrTy =
          cast<PointerType>(Call.getArgOperand(1)->getType());
      if (!Op1PtrTy->isOpaque())
        Op1ElemTy = Op1PtrTy->getElementType();
      break;
    }
    default:
      llvm_unreachable("unexpected intrinsic");
    }

    Assert(ResultTy->getElementType()->isIntegerTy() ||
           ResultTy->getElementType()->isFloatingPointTy(),
           "Result type must be an integer or floating-point type!", IF);

    if (Op0ElemTy)
      Assert(ResultTy->getElementType() == Op0ElemTy,
             "Vector element type mismatch of the result and first operand "
             "vector!", IF);

    if (Op1ElemTy)
      Assert(ResultTy->getElementType() == Op1ElemTy,
             "Vector element type mismatch of the result and second operand "
             "vector!", IF);

    Assert(cast<FixedVectorType>(ResultTy)->getNumElements() ==
               NumRows->getZExtValue() * NumColumns->getZExtValue(),
           "Result of a matrix operation does not fit in the returned vector!");

    if (Stride)
      Assert(Stride->getZExtValue() >= NumRows->getZExtValue(),
             "Stride must be greater or equal than the number of rows!", IF);

    break;
  }
  case Intrinsic::experimental_stepvector: {
    VectorType *VecTy = dyn_cast<VectorType>(Call.getType());
    Assert(VecTy && VecTy->getScalarType()->isIntegerTy() &&
               VecTy->getScalarSizeInBits() >= 8,
           "experimental_stepvector only supported for vectors of integers "
           "with a bitwidth of at least 8.",
           &Call);
    break;
  }
  case Intrinsic::experimental_vector_insert: {
    Value *Vec = Call.getArgOperand(0);
    Value *SubVec = Call.getArgOperand(1);
    Value *Idx = Call.getArgOperand(2);
    unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();

    VectorType *VecTy = cast<VectorType>(Vec->getType());
    VectorType *SubVecTy = cast<VectorType>(SubVec->getType());

    ElementCount VecEC = VecTy->getElementCount();
    ElementCount SubVecEC = SubVecTy->getElementCount();
    Assert(VecTy->getElementType() == SubVecTy->getElementType(),
           "experimental_vector_insert parameters must have the same element "
           "type.",
           &Call);
    Assert(IdxN % SubVecEC.getKnownMinValue() == 0,
           "experimental_vector_insert index must be a constant multiple of "
           "the subvector's known minimum vector length.");

    // If this insertion is not the 'mixed' case where a fixed vector is
    // inserted into a scalable vector, ensure that the insertion of the
    // subvector does not overrun the parent vector.
    if (VecEC.isScalable() == SubVecEC.isScalable()) {
      Assert(
          IdxN < VecEC.getKnownMinValue() &&
              IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
          "subvector operand of experimental_vector_insert would overrun the "
          "vector being inserted into.");
    }
    break;
  }
  case Intrinsic::experimental_vector_extract: {
    Value *Vec = Call.getArgOperand(0);
    Value *Idx = Call.getArgOperand(1);
    unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();

    VectorType *ResultTy = cast<VectorType>(Call.getType());
    VectorType *VecTy = cast<VectorType>(Vec->getType());

    ElementCount VecEC = VecTy->getElementCount();
    ElementCount ResultEC = ResultTy->getElementCount();

    Assert(ResultTy->getElementType() == VecTy->getElementType(),
           "experimental_vector_extract result must have the same element "
           "type as the input vector.",
           &Call);
    Assert(IdxN % ResultEC.getKnownMinValue() == 0,
           "experimental_vector_extract index must be a constant multiple of "
           "the result type's known minimum vector length.");

    // If this extraction is not the 'mixed' case where a fixed vector is is
    // extracted from a scalable vector, ensure that the extraction does not
    // overrun the parent vector.
    if (VecEC.isScalable() == ResultEC.isScalable()) {
      Assert(IdxN < VecEC.getKnownMinValue() &&
                 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
             "experimental_vector_extract would overrun.");
    }
    break;
  }
  case Intrinsic::experimental_noalias_scope_decl: {
    NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
    break;
  }
  case Intrinsic::preserve_array_access_index:
  case Intrinsic::preserve_struct_access_index: {
    Type *ElemTy = Call.getAttributes().getParamElementType(0);
    Assert(ElemTy,
           "Intrinsic requires elementtype attribute on first argument.",
           &Call);
    break;
  }
  };
}