NNAPIDelegateKernel::MappingFn NNAPIDelegateKernel::Map()

in tensorflow/tensorflow/lite/delegates/nnapi/nnapi_delegate.cc [944:2596]


NNAPIDelegateKernel::MappingFn NNAPIDelegateKernel::Map(
    const TfLiteContext* context, int builtin_code, int version,
    int android_sdk_version, const TfLiteNode* node,
    bool is_accelerator_specified) {
  switch (builtin_code) {
    case kTfLiteBuiltinAdd:
      if (version <= 2) {
        if (!IsFloatOrQuant8Operator(context, node)) {
          return nullptr;
        }
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          auto builtin = reinterpret_cast<TfLiteAddParams*>(
              mapping_args.node->builtin_data);
          mapping_args.builder->AddScalarInt32Operand(builtin->activation);
          return ANEURALNETWORKS_ADD;
        };
      }
      break;
    case kTfLiteBuiltinArgMax:
    case kTfLiteBuiltinArgMin:
      if (version <= 2) {
        // Those operators were introduced in NNAPI 1.2.
        if (android_sdk_version < kMinSdkVersionForNNAPI12) {
          return nullptr;
        }
        // Only certain input types are supported.
        auto input_type = context->tensors[node->inputs->data[0]].type;
        if (input_type != kTfLiteFloat16 && input_type != kTfLiteFloat32 &&
            input_type != kTfLiteInt32 && input_type != kTfLiteUInt8 &&
            input_type != kTfLiteInt8) {
          return nullptr;
        }
        // NNAPI only supports axis as int32. If the axis type is int64 and
        // constant we can convert it to int32 if the value isn't too large.
        const auto& axis_tensor = context->tensors[node->inputs->data[1]];
        if (axis_tensor.type == kTfLiteInt64) {
          if (axis_tensor.allocation_type != kTfLiteMmapRo ||
              *axis_tensor.data.i64 > std::numeric_limits<int32_t>::max() ||
              *axis_tensor.data.i64 < std::numeric_limits<int32_t>::min()) {
            return nullptr;
          }
        } else if (axis_tensor.type != kTfLiteInt32) {
          return nullptr;
        }
        if (builtin_code == kTfLiteBuiltinArgMax) {
          // NNAPI only supports int32 output.
          auto builtin =
              reinterpret_cast<TfLiteArgMaxParams*>(node->builtin_data);
          if (builtin->output_type != kTfLiteInt32) {
            return nullptr;
          }
          return BasicMappingFn<ANEURALNETWORKS_ARGMAX>;
        } else {
          // NNAPI only supports int32 output.
          auto builtin =
              reinterpret_cast<TfLiteArgMinParams*>(node->builtin_data);
          if (builtin->output_type != kTfLiteInt32) {
            return nullptr;
          }
          return BasicMappingFn<ANEURALNETWORKS_ARGMIN>;
        }
      }
      break;
    case kTfLiteBuiltinMul:
      if (version <= 2) {
        if (!IsFloatOrQuant8Operator(context, node)) {
          return nullptr;
        }
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          auto builtin = reinterpret_cast<TfLiteMulParams*>(
              mapping_args.node->builtin_data);
          mapping_args.builder->AddScalarInt32Operand(builtin->activation);
          return ANEURALNETWORKS_MUL;
        };
      }
      break;
    case kTfLiteBuiltinAveragePool2d:
      if (version <= 2) {
        if (!IsFloatOrQuant8Operator(context, node)) {
          return nullptr;
        }
        auto builtin = reinterpret_cast<TfLitePoolParams*>(node->builtin_data);
        // TODO(b/138756912): Large filter window would overflow on the
        // reference CPU path.
        if (!is_accelerator_specified &&
            (builtin->filter_width * builtin->filter_height > 256)) {
          return nullptr;
        }
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          mapping_args.builder->AddPoolingParams(
              mapping_args.node->builtin_data);
          return ANEURALNETWORKS_AVERAGE_POOL_2D;
        };
      }
      break;
    case kTfLiteBuiltinMaxPool2d:
      if (version <= 2) {
        if (!IsFloatOrQuant8Operator(context, node)) {
          return nullptr;
        }
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          mapping_args.builder->AddPoolingParams(
              mapping_args.node->builtin_data);
          return ANEURALNETWORKS_MAX_POOL_2D;
        };
      }
      break;
    case kTfLiteBuiltinL2Pool2d:
      if (version == 1) {
        if (!IsFloatOperator(context, node)) {
          return nullptr;
        }
        auto builtin = reinterpret_cast<TfLitePoolParams*>(node->builtin_data);
        // Pre-Q devices may not support fused activation for l2_pool.
        if (android_sdk_version < kMinSdkVersionForNNAPI12 &&
            builtin->activation != kTfLiteActNone) {
          return nullptr;
        }
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          mapping_args.builder->AddPoolingParams(
              mapping_args.node->builtin_data);
          return ANEURALNETWORKS_L2_POOL_2D;
        };
      }
      break;
    case kTfLiteBuiltinConv2d:
      if (version <= 3) {
        if ((android_sdk_version < kMinSdkVersionForNNAPI12) &&
            (IsHybridOperator(context, builtin_code, node) ||
             !IsFloatOrUint8Operator(context, node))) {
          // Hybrid operators not supported before NNAPI 1.2.
          return nullptr;
        }
        if (android_sdk_version < kMinSdkVersionForNNAPI12) {
          // Per-channel quantized convolution not supported before NNAPI 1.2.
          const auto& filter_tensor = context->tensors[node->inputs->data[1]];
          if (filter_tensor.quantization.type == kTfLiteAffineQuantization) {
            TfLiteAffineQuantization* quantization_params =
                static_cast<TfLiteAffineQuantization*>(
                    filter_tensor.quantization.params);
            if (quantization_params->scale->size > 1) {
              return nullptr;
            }
          }
        }
        const auto input_type = context->tensors[node->inputs->data[0]].type;
        if (android_sdk_version < kMinSdkVersionForNNAPI12 &&
            input_type == kTfLiteUInt8 &&
            !IsRestrictedScalesCompliant(context, node)) {
          return nullptr;
        }
        auto builtin = reinterpret_cast<TfLiteConvParams*>(node->builtin_data);
        if (node->inputs->size != 3) {
          // TODO(b/132950584): Add support for Conv2D with omitted bias
          return nullptr;
        }
        // NNAPI supports dilated Conv2D since NNAPI 1.2.
        if (builtin->dilation_width_factor != 1 ||
            builtin->dilation_height_factor != 1) {
          if (android_sdk_version < kMinSdkVersionForNNAPI12) {
            return nullptr;
          }
          return [](const NNAPIOpMappingArgs& mapping_args)
                     -> ANeuralNetworksOperationType {
            auto builtin = reinterpret_cast<TfLiteConvParams*>(
                mapping_args.node->builtin_data);
            mapping_args.builder->AddScalarInt32Operand(builtin->padding);
            mapping_args.builder->AddScalarInt32Operand(builtin->stride_width);
            mapping_args.builder->AddScalarInt32Operand(builtin->stride_height);
            mapping_args.builder->AddScalarInt32Operand(builtin->activation);
            mapping_args.builder->AddScalarBoolOperand(
                false);  // Use NHWC format
            mapping_args.builder->AddScalarInt32Operand(
                builtin->dilation_width_factor);
            mapping_args.builder->AddScalarInt32Operand(
                builtin->dilation_height_factor);
            return ANEURALNETWORKS_CONV_2D;
          };
        }
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          auto builtin = reinterpret_cast<TfLiteConvParams*>(
              mapping_args.node->builtin_data);
          mapping_args.builder->AddScalarInt32Operand(builtin->padding);
          mapping_args.builder->AddScalarInt32Operand(builtin->stride_width);
          mapping_args.builder->AddScalarInt32Operand(builtin->stride_height);
          mapping_args.builder->AddScalarInt32Operand(builtin->activation);
          return ANEURALNETWORKS_CONV_2D;
        };
      }
      break;
    case kTfLiteBuiltinDepthwiseConv2d:
      if (version <= 3) {
        if (android_sdk_version < kMinSdkVersionForNNAPI12 &&
            !IsFloatOrUint8Operator(context, node)) {
          return nullptr;
        }
        const auto input_type = context->tensors[node->inputs->data[0]].type;
        if (android_sdk_version < kMinSdkVersionForNNAPI12 &&
            input_type == kTfLiteUInt8 &&
            !IsRestrictedScalesCompliant(context, node)) {
          return nullptr;
        }
        auto builtin =
            reinterpret_cast<TfLiteDepthwiseConvParams*>(node->builtin_data);
        if (android_sdk_version < kMinSdkVersionForNNAPI12 &&
            (builtin->dilation_width_factor != 1 ||
             builtin->dilation_height_factor != 1)) {
          return nullptr;
        }
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          auto builtin = reinterpret_cast<TfLiteDepthwiseConvParams*>(
              mapping_args.node->builtin_data);
          mapping_args.builder->AddScalarInt32Operand(builtin->padding);
          mapping_args.builder->AddScalarInt32Operand(builtin->stride_width);
          mapping_args.builder->AddScalarInt32Operand(builtin->stride_height);
          mapping_args.builder->AddScalarInt32Operand(
              builtin->depth_multiplier);
          mapping_args.builder->AddScalarInt32Operand(builtin->activation);
          if (builtin->dilation_width_factor != 1 ||
              builtin->dilation_height_factor != 1) {
            mapping_args.builder->AddScalarBoolOperand(
                false);  // Use NHWC format
            mapping_args.builder->AddScalarInt32Operand(
                builtin->dilation_width_factor);
            mapping_args.builder->AddScalarInt32Operand(
                builtin->dilation_height_factor);
          }
          return ANEURALNETWORKS_DEPTHWISE_CONV_2D;
        };
      }
      break;
    case kTfLiteBuiltinFullyConnected:
      if (version <= 4) {
        if (node->inputs->size != 3 ||
            node->inputs->data[2] == kOptionalTensor) {
          // TODO(b/132950584): Add support for FullyConnected with no bias.
          return nullptr;
        }
        const auto output_type = context->tensors[node->outputs->data[0]].type;
        if (output_type == kTfLiteInt16) {
          return nullptr;
        }
        if (android_sdk_version < kMinSdkVersionForNNAPI12 &&
            (IsHybridOperator(context, builtin_code, node) ||
             !IsFloatOrUint8Operator(context, node))) {
          // Hybrid operators not supported before NNAPI 1.2.
          return nullptr;
        }
        const auto input_type = context->tensors[node->inputs->data[0]].type;
        if (android_sdk_version < kMinSdkVersionForNNAPI12 &&
            input_type == kTfLiteUInt8 &&
            !IsRestrictedScalesCompliant(context, node)) {
          return nullptr;
        }
        auto builtin =
            reinterpret_cast<TfLiteFullyConnectedParams*>(node->builtin_data);
        if (builtin->keep_num_dims) {
          return nullptr;
        }
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          auto builtin = reinterpret_cast<TfLiteFullyConnectedParams*>(
              mapping_args.node->builtin_data);
          mapping_args.builder->AddScalarInt32Operand(builtin->activation);
          return ANEURALNETWORKS_FULLY_CONNECTED;
        };
      }
      break;
    case kTfLiteBuiltinHardSwish:
      // TODO(131260336): Add support for hardswish, at the very least
      // we should deconstruct it into basic ops. Though for some nnapi
      // accelerators using optimized tflite kernels might even be faster.
      return nullptr;

    case kTfLiteBuiltinSoftmax:
      if (version <= 2) {
        const auto& input = context->tensors[node->outputs->data[0]];
        if (!IsFloatOrQuant8Operator(context, node)) {
          return nullptr;
        }
        const int input_rank = input.dims->size;
        if (input_rank > 4) return nullptr;
        // Before API level 29 only 2D and 4D input tensors were supported.
        if (android_sdk_version < kMinSdkVersionForNNAPI12) {
          if (input_rank != 2 && input_rank != 4) return nullptr;
        }
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          auto builtin = reinterpret_cast<TfLiteSoftmaxParams*>(
              mapping_args.node->builtin_data);
          mapping_args.builder->AddScalarFloat32Operand(builtin->beta);
          // Optional scalar specifying the dimension the activation would be
          // performed on is not added. Default to -1.
          return ANEURALNETWORKS_SOFTMAX;
        };
      }
      break;
    case kTfLiteBuiltinReshape:
      if (version == 1) {
        if (!IsFloatOrQuant8Operator(context, node)) {
          return nullptr;
        }
        // The shape input tensor must be constant.
        if ((node->inputs->size < 2) ||
            (context->tensors[node->inputs->data[1]].allocation_type !=
             kTfLiteMmapRo)) {
          return nullptr;
        }
        return BasicMappingFn<ANEURALNETWORKS_RESHAPE>;
      }
      break;
    case kTfLiteBuiltinResizeBilinear:
      if (version <= 2) {
        const auto& input = context->tensors[node->inputs->data[0]];
        const auto output_dims = context->tensors[node->outputs->data[0]].dims;
        if (input.dims->size != 4) return nullptr;
        if (!IsFloatOrQuant8Operator(context, node)) {
          return nullptr;
        }
        // The size input tensor must be constant.
        if ((node->inputs->size < 2) ||
            (context->tensors[node->inputs->data[1]].allocation_type !=
             kTfLiteMmapRo)) {
          return nullptr;
        }
        if (android_sdk_version < kMinSdkVersionForNNAPI12 &&
            output_dims->data[1] != output_dims->data[2]) {
          // Require width == height due to driver differences in NNAPI < 1.2
          return nullptr;
        }
        auto builtin =
            reinterpret_cast<TfLiteResizeBilinearParams*>(node->builtin_data);
        if (builtin->align_corners) {
          // NNAPI does not support align_corners == true.
          return nullptr;
        }
        if (android_sdk_version < kMinSdkVersionForNNAPI12 &&
            input.type != kTfLiteFloat32) {
          // NNAPI 1.0 & 1.1 only supports float input.
          return nullptr;
        }
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          const int output_id = mapping_args.node->outputs->data[0];
          auto& output = mapping_args.context->tensors[output_id];
          const int output_height = output.dims->data[1];
          const int output_width = output.dims->data[2];
          mapping_args.builder->AddScalarInt32Operand(output_width);
          mapping_args.builder->AddScalarInt32Operand(output_height);
          return ANEURALNETWORKS_RESIZE_BILINEAR;
        };
      }
      break;
    case kTfLiteBuiltinResizeNearestNeighbor: {
      if (version > 2 || android_sdk_version < kMinSdkVersionForNNAPI12) {
        return nullptr;
      }
      if (!IsFloatOrQuant8Operator(context, node)) {
        return nullptr;
      }
      auto builtin = reinterpret_cast<TfLiteResizeNearestNeighborParams*>(
          node->builtin_data);
      if (builtin->align_corners) {
        // NNAPI does not support align_corners == true.
        return nullptr;
      }
      return [](const NNAPIOpMappingArgs& mapping_args)
                 -> ANeuralNetworksOperationType {
        const TfLiteTensor& new_shape =
            mapping_args.context->tensors[mapping_args.node->inputs->data[1]];
        // NNAPI uses scalar inputs for height and width.
        mapping_args.builder->AddScalarInt32Operand(new_shape.data.i32[1]);
        mapping_args.builder->AddScalarInt32Operand(new_shape.data.i32[0]);
        mapping_args.builder->AddScalarBoolOperand(false);  // Use NHWC format

        return ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR;
      };
    } break;
    case kTfLiteBuiltinSqueeze:
      if (version == 1 && android_sdk_version >= kMinSdkVersionForNNAPI11) {
        auto builtin =
            reinterpret_cast<TfLiteSqueezeParams*>(node->builtin_data);
        if (android_sdk_version == kMinSdkVersionForNNAPI11 &&
            builtin->num_squeeze_dims == 0) {
          // NNAPI 1.1 does not support null squeeze_dims properly.
          return nullptr;
        }
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          auto builtin = reinterpret_cast<TfLiteSqueezeParams*>(
              mapping_args.node->builtin_data);
          // Note that we add the squeeze dimensions even if the dimensions
          // were unspecified (empty), as NNAPI requires the operand.
          mapping_args.builder->AddVectorInt32Operand(
              builtin->num_squeeze_dims ? builtin->squeeze_dims : nullptr,
              static_cast<uint32_t>(builtin->num_squeeze_dims));
          return ANEURALNETWORKS_SQUEEZE;
        };
      }
      break;
    case kTfLiteBuiltinUnidirectionalSequenceLstm:
      if (version <= 2 && android_sdk_version >= kMinSdkVersionForNNAPI12) {
        if (IsHybridOperator(context, builtin_code, node)) {
          // Hybrid version of this op is not supported by NN API.
          return nullptr;
        }
        if (node->inputs->size != 20 && node->inputs->size != 24) {
          return nullptr;
        }
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          auto builtin =
              reinterpret_cast<TfLiteUnidirectionalSequenceLSTMParams*>(
                  mapping_args.node->builtin_data);
          mapping_args.builder->AddScalarInt32Operand(builtin->activation);
          mapping_args.builder->AddScalarFloat32Operand(builtin->cell_clip);
          mapping_args.builder->AddScalarFloat32Operand(builtin->proj_clip);
          mapping_args.builder->AddScalarBoolOperand(builtin->time_major);
          const bool hybrid_op = IsHybridOperator(
              mapping_args.context, kTfLiteBuiltinUnidirectionalSequenceLstm,
              mapping_args.node);
          if (mapping_args.node->inputs->size == 24) {
            // Add layer normalization tensors if they are provided.
            for (int i = 20; i < 24; ++i) {
              const int input_index = mapping_args.node->inputs->data[i];
              if (input_index != kOptionalTensor) {
                mapping_args.builder->AddTensorInput(input_index, hybrid_op);
              } else {
                mapping_args.builder->AddVectorFloat32Operand(nullptr, 0);
              }
            }
          } else {
            for (int i = 0; i < 4; ++i) {
              mapping_args.builder->AddVectorFloat32Operand(nullptr, 0);
            }
          }

          return ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM;
        };
      }
      break;
    case kTfLiteBuiltinL2Normalization: {
      if (version <= 2) {
        const auto& input = context->tensors[node->inputs->data[0]];
        if (android_sdk_version < kMinSdkVersionForNNAPI12 &&
            (!IsFloatOperator(context, node) || input.dims->size != 4)) {
          return nullptr;
        }
        auto builtin =
            reinterpret_cast<TfLiteL2NormParams*>(node->builtin_data);
        if (builtin->activation == kTfLiteActNone) {
          return BasicMappingFn<ANEURALNETWORKS_L2_NORMALIZATION>;
        }
      }
      break;
    }
    case kTfLiteBuiltinLocalResponseNormalization:
      if (version == 1) {
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          auto builtin = reinterpret_cast<TfLiteLocalResponseNormParams*>(
              mapping_args.node->builtin_data);
          mapping_args.builder->AddScalarInt32Operand(builtin->radius);
          mapping_args.builder->AddScalarFloat32Operand(builtin->bias);
          mapping_args.builder->AddScalarFloat32Operand(builtin->alpha);
          mapping_args.builder->AddScalarFloat32Operand(builtin->beta);
          return ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION;
        };
      }
      break;
    case kTfLiteBuiltinLshProjection:
      if (version == 1) {
        if (reinterpret_cast<TfLiteLSHProjectionParams*>(node->builtin_data)
                ->type == kTfLiteLshProjectionSparse) {
          // NNAPI does not support sparse projection correctly pre-Q
          // (b/111751836).
          if (android_sdk_version < kMinSdkVersionForNNAPI12) {
            return nullptr;
          }
          // NNAPI does not support weights for sparse projects.
          if (node->inputs->size != 2) {
            return nullptr;
          }
        }
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          auto builtin = reinterpret_cast<TfLiteLSHProjectionParams*>(
              mapping_args.node->builtin_data);
          int type = builtin->type;
          // In Android Q+, NNAPI uses 3 to denote
          // kTfLiteLshProjectionSparse.
          const int kNNAPILshProjectionSparse = 3;
          if (builtin->type == kTfLiteLshProjectionSparse) {
            type = kNNAPILshProjectionSparse;
            // Add NNAPI null weight operand.
            mapping_args.builder->AddVectorFloat32Operand(nullptr, 0);
          }
          mapping_args.builder->AddScalarInt32Operand(type);
          return ANEURALNETWORKS_LSH_PROJECTION;
        };
      }
      break;
    case kTfLiteBuiltinConcatenation:
      if (version <= 2 &&
          reinterpret_cast<TfLiteConcatenationParams*>(node->builtin_data)
                  ->activation == kTfLiteActNone &&
          context->tensors[node->inputs->data[0]].dims->size <= 4) {
        if (context->tensors[node->inputs->data[0]].type == kTfLiteUInt8 &&
            android_sdk_version < kMinSdkVersionForNNAPI12) {
          // NNAPI 1.0-1 only supported concatenating quantized tensor of
          // the same scale and offset.
          auto first_param = context->tensors[node->inputs->data[0]].params;
          for (int i = 1; i < node->inputs->size; i++) {
            auto curr_param = context->tensors[node->inputs->data[i]].params;
            if (curr_param.scale != first_param.scale ||
                curr_param.zero_point != first_param.zero_point) {
              return nullptr;
            }
          }
        }
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          auto builtin = reinterpret_cast<TfLiteConcatenationParams*>(
              mapping_args.node->builtin_data);
          int axis = builtin->axis < 0
                         ? mapping_args.context
                                   ->tensors[mapping_args.node->inputs->data[0]]
                                   .dims->size +
                               builtin->axis
                         : builtin->axis;
          mapping_args.builder->AddScalarInt32Operand(axis);
          return ANEURALNETWORKS_CONCATENATION;
        };
      }
      break;
    case kTfLiteBuiltinDequantize:
      if (version == 1 || version == 2) {
        const auto& input = context->tensors[node->inputs->data[0]];
        if (input.type == kTfLiteFloat16) {
          return nullptr;
        }
        const auto zero_point = input.params.zero_point;
        // NN API supports int8 type since version 1.2 but only for
        // symmetric quantization.
        if (input.type == kTfLiteInt8 &&
            (zero_point != 0 ||
             android_sdk_version < kMinSdkVersionForNNAPI12)) {
          return nullptr;
        }
        return BasicMappingFn<ANEURALNETWORKS_DEQUANTIZE>;
      }
      break;
    case kTfLiteBuiltinFloor:
      if (version == 1) {
        return BasicMappingFn<ANEURALNETWORKS_FLOOR>;
      }
      break;
    case kTfLiteBuiltinRelu:
      if (version == 1) {
        if (!IsFloatOrQuant8Operator(context, node)) {
          return nullptr;
        }
        return BasicMappingFn<ANEURALNETWORKS_RELU>;
      }
      break;
    case kTfLiteBuiltinReluN1To1:
      if (version == 1) {
        if (!IsFloatOrQuant8Operator(context, node)) {
          return nullptr;
        }
        return BasicMappingFn<ANEURALNETWORKS_RELU1>;
      }
      break;
    case kTfLiteBuiltinRelu6:
      if (version == 1) {
        if (!IsFloatOrQuant8Operator(context, node)) {
          return nullptr;
        }
        return BasicMappingFn<ANEURALNETWORKS_RELU6>;
      }
      break;
    case kTfLiteBuiltinLogistic:
      if (version <= 2) {
        if (!IsFloatOrQuant8Operator(context, node)) {
          return nullptr;
        }
        return BasicMappingFn<ANEURALNETWORKS_LOGISTIC>;
      }
      break;
    case kTfLiteBuiltinTanh:
      if (version <= 2) {
        const TfLiteType input_type =
            context->tensors[node->inputs->data[0]].type;
        if (IsFloat(input_type) ||
            (IsQuantized(input_type) &&
             android_sdk_version >= kMinSdkVersionForNNAPI12)) {
          // NNAPI only support float tanh.
          return BasicMappingFn<ANEURALNETWORKS_TANH>;
        }
      }
      break;
    case kTfLiteBuiltinSub:
      if (version <= 2) {
        const TfLiteType input_type =
            context->tensors[node->inputs->data[0]].type;
        if ((android_sdk_version >= kMinSdkVersionForNNAPI11 &&
             IsFloat(input_type)) ||
            (android_sdk_version >= kMinSdkVersionForNNAPI12 &&
             IsQuantized(input_type))) {
          // NNAPI only support float sub.
          return [](const NNAPIOpMappingArgs& mapping_args)
                     -> ANeuralNetworksOperationType {
            auto builtin = reinterpret_cast<TfLiteSubParams*>(
                mapping_args.node->builtin_data);
            mapping_args.builder->AddScalarInt32Operand(builtin->activation);
            return ANEURALNETWORKS_SUB;
          };
        }
      }
      break;
    case kTfLiteBuiltinDiv:
      if (version == 1 && android_sdk_version >= kMinSdkVersionForNNAPI11 &&
          context->tensors[node->inputs->data[0]].type == kTfLiteFloat32) {
        // NNAPI only support float div.
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          auto builtin = reinterpret_cast<TfLiteDivParams*>(
              mapping_args.node->builtin_data);
          mapping_args.builder->AddScalarInt32Operand(builtin->activation);
          return ANEURALNETWORKS_DIV;
        };
      }
      break;
    case kTfLiteBuiltinPad:
    case kTfLiteBuiltinPadv2: {
      if (version <= 2 && IsFloatOrQuant8Operator(context, node)) {
        const TfLiteIntArrayView input_shape(
            context->tensors[node->inputs->data[0]].dims);
        if (HasZeroes(input_shape)) {
          // NN API pad ops do not support input tensors with no elements
          return nullptr;
        }
        if (node->inputs->size == 2 &&
            android_sdk_version >= kMinSdkVersionForNNAPI11 &&
            (context->tensors[node->inputs->data[0]].type == kTfLiteFloat32 ||
             android_sdk_version >= kMinSdkVersionForNNAPI12)) {
          // NNAPI does not support specifying the padding value.
          // Before 1.2, NNAPI pads physical zero for quantized tensors, so
          // only delegate float pad to NNAPI. NNAPI 1.2 onwards pads with
          // zero-point, so delegate quantized pad as well.
          return BasicMappingFn<ANEURALNETWORKS_PAD>;
        } else if (node->inputs->size == 3 &&
                   android_sdk_version >= kMinSdkVersionForNNAPI12) {
          const int constant_value_id = node->inputs->data[2];
          if (constant_value_id == kOptionalTensor) {
            return BasicMappingFn<ANEURALNETWORKS_PAD>;
          }
          return BasicMappingFn<ANEURALNETWORKS_PAD_V2>;
        }
      }
    } break;
    case kTfLiteBuiltinUnidirectionalSequenceRnn:
      if (version == 1 && android_sdk_version >= kMinSdkVersionForNNAPI12) {
        if (IsHybridOperator(context, builtin_code, node)) {
          // Hybrid version of this op is not supported by NN API.
          return nullptr;
        }
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          auto builtin = reinterpret_cast<TfLiteSequenceRNNParams*>(
              mapping_args.node->builtin_data);
          mapping_args.builder->AddScalarInt32Operand(builtin->activation);
          mapping_args.builder->AddScalarInt32Operand(builtin->time_major);
          return ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN;
        };
      }
      break;
    case kTfLiteBuiltinSpaceToBatchNd:
      if (version <= 2 && android_sdk_version >= kMinSdkVersionForNNAPI11) {
        return BasicMappingFn<ANEURALNETWORKS_SPACE_TO_BATCH_ND>;
      }
      break;
    case kTfLiteBuiltinBatchToSpaceNd:
      if (version <= 2 && android_sdk_version >= kMinSdkVersionForNNAPI11) {
        auto crops = context->tensors[node->inputs->data[2]];
        auto crops_data = crops.data.i32;
        // Check if all crops are 0.
        if (!crops_data || crops.bytes != 16 || crops_data[0] != 0 ||
            crops_data[1] != 0 || crops_data[2] != 0 || crops_data[3] != 0) {
          return nullptr;
        }
        return BasicMappingFn<ANEURALNETWORKS_BATCH_TO_SPACE_ND>;
      }
      break;
    case kTfLiteBuiltinStridedSlice:
      if (version <= 2 && android_sdk_version >= kMinSdkVersionForNNAPI11) {
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          auto builtin = reinterpret_cast<TfLiteStridedSliceParams*>(
              mapping_args.node->builtin_data);
          mapping_args.builder->AddScalarInt32Operand(builtin->begin_mask);
          mapping_args.builder->AddScalarInt32Operand(builtin->end_mask);
          mapping_args.builder->AddScalarInt32Operand(
              builtin->shrink_axis_mask);
          return ANEURALNETWORKS_STRIDED_SLICE;
        };
      }
      break;
    case kTfLiteBuiltinTranspose:
      // Note that the permutation input tensor value dictates the output
      // dimensions.
      // TODO(b/110888333): Support dynamically-sized tensors in delegates.
      if ((version <= 2) && (android_sdk_version >= kMinSdkVersionForNNAPI11) &&
          (node->inputs->size > 1) &&
          (context->tensors[node->inputs->data[1]].allocation_type ==
           kTfLiteMmapRo)) {
        return BasicMappingFn<ANEURALNETWORKS_TRANSPOSE>;
      }
      break;
    case kTfLiteBuiltinAbs:
      // NN API only supports float inputs to this op.
      if (version == 1 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          IsFloat(context->tensors[node->inputs->data[0]].type)) {
        return BasicMappingFn<ANEURALNETWORKS_ABS>;
      }
      break;
    case kTfLiteBuiltinExp:
      // NN API only supports float inputs to this op.
      if (version == 1 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          IsFloat(context->tensors[node->inputs->data[0]].type)) {
        return BasicMappingFn<ANEURALNETWORKS_EXP>;
      }
      break;
    case kTfLiteBuiltinLog:
      // NN API only supports float inputs to this op.
      if (version == 1 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          IsFloat(context->tensors[node->inputs->data[0]].type)) {
        return BasicMappingFn<ANEURALNETWORKS_LOG>;
      }
      break;
    case kTfLiteBuiltinRsqrt:
      // NN API only supports float inputs to this op.
      if (version == 1 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          IsFloatOperator(context, node)) {
        return BasicMappingFn<ANEURALNETWORKS_RSQRT>;
      }
      break;
    case kTfLiteBuiltinPow:
      // NN API only supports float inputs to this op.
      if (version == 1 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          IsFloat(context->tensors[node->inputs->data[0]].type)) {
        return BasicMappingFn<ANEURALNETWORKS_POW>;
      }
      break;
    case kTfLiteBuiltinSlice: {
      const auto input_type = context->tensors[node->inputs->data[0]].type;
      const auto begin_type = context->tensors[node->inputs->data[1]].type;
      const auto size_type = context->tensors[node->inputs->data[2]].type;
      if (version <= 2 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          (input_type == kTfLiteFloat32 || input_type == kTfLiteInt32 ||
           input_type == kTfLiteUInt8 || input_type == kTfLiteInt8) &&
          begin_type == kTfLiteInt32 && size_type == kTfLiteInt32) {
        return BasicMappingFn<ANEURALNETWORKS_SLICE>;
      }
    } break;
    case kTfLiteBuiltinSin:
      if (version == 1 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          IsFloat(context->tensors[node->inputs->data[0]].type)) {
        return BasicMappingFn<ANEURALNETWORKS_SIN>;
      }
      break;
    case kTfLiteBuiltinTransposeConv:
      if (version == 1 && android_sdk_version >= kMinSdkVersionForNNAPI12) {
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          const bool hybrid_op =
              IsHybridOperator(mapping_args.context,
                               kTfLiteBuiltinTransposeConv, mapping_args.node);
          mapping_args.builder->AddTensorInput(
              mapping_args.node->inputs->data[/* kDataInputTensor */ 2],
              hybrid_op);

          mapping_args.builder->AddTensorInput(
              mapping_args.node->inputs->data[/* kWeightsTensor */ 1],
              hybrid_op);

          // NNAPI requires a bias tensor, so we allocate a new tensor to fill
          // it with zeroes. It is deleted with other tensors in the context
          // during subgraph destructor call.
          int bias_index = -1;
          mapping_args.context->AddTensors(mapping_args.context, 1,
                                           &bias_index);
          TfLiteTensor* bias_tensor =
              &mapping_args.context->tensors[bias_index];
          const auto input_type =
              mapping_args.context
                  ->tensors[mapping_args.node->inputs
                                ->data[/*kDataInputTensor*/ 2]]
                  .type;
          if (input_type == kTfLiteFloat32) {
            bias_tensor->type = kTfLiteFloat32;
          } else {
            bias_tensor->type = kTfLiteInt32;
          }

          // Create an array with a required bias shape and resize the bias
          // tensor.
          TfLiteIntArray* bias_shape = TfLiteIntArrayCreate(1);
          const TfLiteTensor& output_shape =
              mapping_args.context->tensors
                  [mapping_args.node->inputs->data[/*kOutputShapeTensor*/ 0]];
          const int output_depth = output_shape.data.i32[3];
          bias_shape->data[0] = output_depth;
          bias_tensor->allocation_type = kTfLiteDynamic;
          mapping_args.context->ResizeTensor(mapping_args.context, bias_tensor,
                                             bias_shape);

          // Set tensor's values to zeroes and add it using AddVector*, so
          // that the values are copied to NNAPI. We don't use the AddTensor
          // function because it doesn't copy values and the tensor we just
          // created is not in the node->inputs.
          if (input_type == kTfLiteFloat32) {
            memset(bias_tensor->data.f, 0, output_depth * sizeof(float));
            mapping_args.builder->AddVectorFloat32Operand(bias_tensor->data.f,
                                                          output_depth);
          } else {
            memset(bias_tensor->data.i32, 0, output_depth * sizeof(int));
            const TfLiteTensor& input_tensor =
                mapping_args.context->tensors
                    [mapping_args.node->inputs->data[/*kDataInputTensor*/ 2]];
            const TfLiteTensor& filter_tensor =
                mapping_args.context->tensors[mapping_args.node->inputs
                                                  ->data[/*kWeightsTensor*/ 1]];
            // NNAPI requires bias scale to be a product of an input scale and
            // a filter scale.
            bias_tensor->params.scale =
                input_tensor.params.scale * filter_tensor.params.scale;
            mapping_args.builder->AddVectorInt32Operand(
                bias_tensor->data.i32, output_depth,
                input_tensor.params.scale * filter_tensor.params.scale,
                /*zero_point=*/0);
          }

          mapping_args.builder->AddTensorInput(
              mapping_args.node->inputs->data[/* kOutputShapeTensor */ 0],
              hybrid_op);

          auto builtin = reinterpret_cast<TfLiteTransposeConvParams*>(
              mapping_args.node->builtin_data);
          mapping_args.builder->AddScalarInt32Operand(builtin->padding);
          mapping_args.builder->AddScalarInt32Operand(builtin->stride_width);
          mapping_args.builder->AddScalarInt32Operand(builtin->stride_height);
          mapping_args.builder->AddScalarInt32Operand(
              /*ANEURALNETWORKS_FUSED_NONE*/ 0);
          // Use NHWC layout for input and output
          mapping_args.builder->AddScalarBoolOperand(false);
          return ANEURALNETWORKS_TRANSPOSE_CONV;
        };
      }
      break;
    case kTfLiteBuiltinSqrt:
      if (version == 1 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          IsFloat(context->tensors[node->inputs->data[0]].type)) {
        return BasicMappingFn<ANEURALNETWORKS_SQRT>;
      }
      break;
    case kTfLiteBuiltinRnn:
      // NNAPI only support float32 weights.
      if (version == 1 && node->inputs->size == 5 &&
          context->tensors[node->inputs->data[/*kWeightsTensor*/ 1]].type ==
              kTfLiteFloat32) {
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          // NNAPI need both state_in and state_out.
          int ann_index;
          mapping_args.builder->AddStateFloat32Tensor(
              mapping_args.node->inputs->data[/*kHiddenStateTensor*/ 4],
              &ann_index);
          mapping_args.model_state_outputs->push_back(ann_index);
          mapping_args.model_state_tfl_inputs->push_back(
              mapping_args.node->inputs->data[/*kHiddenStateTensor*/ 4]);
          auto builtin = reinterpret_cast<TfLiteRNNParams*>(
              mapping_args.node->builtin_data);
          mapping_args.builder->AddScalarInt32Operand(builtin->activation);
          return ANEURALNETWORKS_RNN;
        };
      }
      break;
    case kTfLiteBuiltinSpaceToDepth: {
      const TfLiteType input_type =
          context->tensors[node->inputs->data[0]].type;
      if (version <= 2 &&
          (input_type == kTfLiteFloat32 || input_type == kTfLiteUInt8 ||
           input_type == kTfLiteInt8)) {
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          auto builtin = reinterpret_cast<TfLiteSpaceToDepthParams*>(
              mapping_args.node->builtin_data);
          mapping_args.builder->AddScalarInt32Operand(builtin->block_size);
          return ANEURALNETWORKS_SPACE_TO_DEPTH;
        };
      }
    } break;
    case kTfLiteBuiltinDepthToSpace: {
      const TfLiteType input_type =
          context->tensors[node->inputs->data[0]].type;
      if (version <= 1 &&
          (input_type == kTfLiteFloat32 || input_type == kTfLiteUInt8 ||
           input_type == kTfLiteInt8)) {
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          auto builtin = reinterpret_cast<TfLiteDepthToSpaceParams*>(
              mapping_args.node->builtin_data);
          mapping_args.builder->AddScalarInt32Operand(builtin->block_size);
          return ANEURALNETWORKS_DEPTH_TO_SPACE;
        };
      }
    } break;
    case kTfLiteBuiltinSvdf:
      // NNAPI only support float32 weights.
      // Only delegate to NNAPI 1.1, as SVDF does not support rank > 1
      // on 1.0.
      if (version == 1 && node->inputs->size == 5 &&
          android_sdk_version >= kMinSdkVersionForNNAPI11 &&
          context->tensors[node->inputs->data[/*kWeightsFeatureTensor*/ 1]]
                  .type == kTfLiteFloat32) {
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          // NNAPI need both state_in and state_out.
          int ann_index;
          mapping_args.builder->AddStateFloat32Tensor(
              mapping_args.node->inputs
                  ->data[/*kInputActivationStateTensor*/ 4],
              &ann_index);
          mapping_args.model_state_outputs->push_back(ann_index);
          mapping_args.model_state_tfl_inputs->push_back(
              mapping_args.node->inputs
                  ->data[/*kInputActivationStateTensor*/ 4]);

          auto builtin = reinterpret_cast<TfLiteSVDFParams*>(
              mapping_args.node->builtin_data);
          mapping_args.builder->AddScalarInt32Operand(builtin->rank);
          mapping_args.builder->AddScalarInt32Operand(builtin->activation);
          return ANEURALNETWORKS_SVDF;
        };
      }
      break;
    case kTfLiteBuiltinLstm:
      // TODO(miaowang): add loggings to indicate why the op is rejected.
      if (version <= 3) {
        if (android_sdk_version < kMinSdkVersionForNNAPI11) {
          // Only delegate to NNAPI 1.1+, as 1.0 has a bug for optional
          // tensors which would affect LSTM.
          return nullptr;
        }
        if (android_sdk_version < kMinSdkVersionForNNAPI12 &&
            IsHybridOperator(context, builtin_code, node)) {
          // Hybrid operators not supported before NNAPI 1.2.
          return nullptr;
        }

        const auto weight_input_index =
            isLstmBasicKernel(node) ? 2 /*  basic::kInputWeights */
                                    : 4 /* full::kInputToOutputWeightsTensor */;

        const TfLiteType weight_type =
            context->tensors[node->inputs->data[weight_input_index]].type;

        if (isLstmBasicKernel(node)) {
          if (weight_type != kTfLiteUInt8) {
            return nullptr;
          }
          const auto input_quantization_params =
              context->tensors[node->inputs->data[0]].params;
          if (input_quantization_params.scale != 1. / 128. ||
              input_quantization_params.zero_point != 128) {
            return nullptr;
          }

          const auto output_quantization_params =
              context->tensors[node->outputs->data[0]].params;
          if (output_quantization_params.scale != 1. / 128. ||
              output_quantization_params.zero_point != 128) {
            return nullptr;
          }

          const auto cell_state_quantization_params =
              context->tensors[node->outputs->data[1]].params;
          if (cell_state_quantization_params.scale != 16. / 32768. ||
              cell_state_quantization_params.zero_point != 0) {
            return nullptr;
          }

          auto is_const_tensor = [&node, &context](int tensor_idx) {
            return context->tensors[node->inputs->data[tensor_idx]]
                       .allocation_type == kTfLiteMmapRo;
          };

          if (!is_const_tensor(2 /* kInputWeights */)) {
            return nullptr;
          }

          if (!is_const_tensor(3 /* kInputBiases */)) {
            return nullptr;
          }

          return [](const NNAPIOpMappingArgs& mapping_args)
                     -> ANeuralNetworksOperationType {
            const auto output_dims =
                mapping_args.context
                    ->tensors[mapping_args.node->outputs->data[1]]
                    .dims;

            // Inputs kInputData
            mapping_args.builder->AddTensorInput(
                mapping_args.node->inputs->data[0 /* kInputData */],
                /* hybrid_op */ false,
                /* scalar_as_tensor */ false);

            // The 8 weights tensors are set decomposing the
            // kInputWeights param
            const auto weight_tensor =
                mapping_args.context->tensors
                    [mapping_args.node->inputs->data[2 /* kInputWeights */]];

            std::vector<uint8_t> recurrent_to_input;
            std::vector<uint8_t> input_to_input;
            std::vector<uint8_t> recurrent_to_cell;
            std::vector<uint8_t> input_to_cell;
            std::vector<uint8_t> recurrent_to_forget;
            std::vector<uint8_t> input_to_forget;
            std::vector<uint8_t> recurrent_to_output;
            std::vector<uint8_t> input_to_output;
            tflite::delegate::nnapi::DecomposeQuantLstmWeightsTensor(
                weight_tensor.data.uint8, weight_tensor.dims,
                &recurrent_to_input, &input_to_input, &recurrent_to_cell,
                &input_to_cell, &recurrent_to_forget, &input_to_forget,
                &recurrent_to_output, &input_to_output);

            TfLiteIntArray* recurrent_weight_dims = TfLiteIntArrayCreate(2);
            TfLiteIntArray* input_weight_dims = TfLiteIntArrayCreate(2);
            tflite::delegate::nnapi::SetWeightSubmatrixDims(
                weight_tensor.dims, recurrent_weight_dims, input_weight_dims);

            int new_tensor_index = -1;

            mapping_args.builder->AddNewInputConstantTensor<uint8_t>(
                ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
                input_weight_dims, input_to_input, weight_tensor.params,
                &new_tensor_index);

            mapping_args.builder->AddNewInputConstantTensor<uint8_t>(
                ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
                input_weight_dims, input_to_forget, weight_tensor.params,
                &new_tensor_index);

            mapping_args.builder->AddNewInputConstantTensor<uint8_t>(
                ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
                input_weight_dims, input_to_cell, weight_tensor.params,
                &new_tensor_index);

            mapping_args.builder->AddNewInputConstantTensor<uint8_t>(
                ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
                input_weight_dims, input_to_output, weight_tensor.params,
                &new_tensor_index);

            mapping_args.builder->AddNewInputConstantTensor<uint8_t>(
                ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
                recurrent_weight_dims, recurrent_to_input, weight_tensor.params,
                &new_tensor_index);

            mapping_args.builder->AddNewInputConstantTensor<uint8_t>(
                ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
                recurrent_weight_dims, recurrent_to_forget,
                weight_tensor.params, &new_tensor_index);

            mapping_args.builder->AddNewInputConstantTensor<uint8_t>(
                ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
                recurrent_weight_dims, recurrent_to_cell, weight_tensor.params,
                &new_tensor_index);

            mapping_args.builder->AddNewInputConstantTensor<uint8_t>(
                ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
                recurrent_weight_dims, recurrent_to_output,
                weight_tensor.params, &new_tensor_index);

            TfLiteIntArrayFree(input_weight_dims);
            TfLiteIntArrayFree(recurrent_weight_dims);

            // Biases have to be split in four
            const auto bias_size = output_dims->data[1];
            const TfLiteTensor& biases_tensor =
                mapping_args.context->tensors[mapping_args.node->inputs
                                                  ->data[3 /* kInputBiases */]];

            std::vector<int32_t> input_bias;
            std::vector<int32_t> cell_bias;
            std::vector<int32_t> forget_bias;
            std::vector<int32_t> output_bias;
            delegate::nnapi::DecomposeBiasTensor(
                biases_tensor.data.i32, bias_size, &input_bias, &cell_bias,
                &forget_bias, &output_bias);

            int input_bias_tensor = -1;
            mapping_args.builder->AddNewInputConstantTensor<int32_t>(
                ANEURALNETWORKS_TENSOR_INT32, kTfLiteInt32, {bias_size},
                input_bias, biases_tensor.params, &input_bias_tensor);
            int forget_bias_tensor = -1;
            mapping_args.builder->AddNewInputConstantTensor(
                ANEURALNETWORKS_TENSOR_INT32, kTfLiteInt32, {bias_size},
                forget_bias, biases_tensor.params, &forget_bias_tensor);
            int cell_gate_bias_tensor = -1;
            mapping_args.builder->AddNewInputConstantTensor(
                ANEURALNETWORKS_TENSOR_INT32, kTfLiteInt32, {bias_size},
                cell_bias, biases_tensor.params, &cell_gate_bias_tensor);
            int output_gate_bias_tensor = -1;
            mapping_args.builder->AddNewInputConstantTensor(
                ANEURALNETWORKS_TENSOR_INT32, kTfLiteInt32, {bias_size},
                output_bias, biases_tensor.params, &output_gate_bias_tensor);

            mapping_args.builder->AddTensorInput(
                mapping_args.node->inputs->data[4 /* kInputPrevState */],
                /* hybrid_op */ false,
                /* scalar_as_tensor */ false);

            // kInputPrevActivation
            mapping_args.builder->AddTensorInput(
                mapping_args.node->inputs->data[1 /* kInputPrevActivation */],
                /* hybrid_op */ false,
                /* scalar_as_tensor */ false);

            // Configuring the copy from the activation, state outputs
            // to their associated inputs
            mapping_args.feedback_loops->push_back(std::make_tuple(
                0 /*kOutputActivation*/, 1 /*kInputPrevActivation*/));

            mapping_args.feedback_loops->push_back(
                std::make_tuple(1 /*kOutputState*/, 4 /*kInputPrevState*/));

            // OUTPUTS
            // Setting only the first two since the remaining ones are
            // ignored by NNAPI
            mapping_args.builder->AddTensorOutput(
                mapping_args.node->outputs->data[1 /* kOutputState */], 0);

            mapping_args.builder->AddTensorOutput(
                mapping_args.node->outputs
                    ->data[0 /* kOutputkOutputActivationState */],
                0);

            return ANEURALNETWORKS_QUANTIZED_16BIT_LSTM;
          };
        }
        if (node->inputs->size == 24 &&
            android_sdk_version < kMinSdkVersionForNNAPI12) {
          // LSTM with layer norm introduced in API level 29
          return nullptr;
        }
        if (weight_type != kTfLiteFloat32 && weight_type != kTfLiteUInt8) {
          return nullptr;
        }
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          auto builtin = reinterpret_cast<TfLiteLSTMParams*>(
              mapping_args.node->builtin_data);
          mapping_args.builder->AddScalarInt32Operand(builtin->activation);
          mapping_args.builder->AddScalarFloat32Operand(builtin->cell_clip);
          mapping_args.builder->AddScalarFloat32Operand(builtin->proj_clip);

          // Current NNAPI implementation requires the scratch_buffer as
          // output.
          mapping_args.builder->AddAdditionalFloat32OutputTensor(2);

          // NNAPI need both state_in and state_out for cell_state and
          // output_state.
          int ann_index;
          mapping_args.builder->AddStateFloat32Tensor(
              mapping_args.node->inputs
                  ->data[/*kInputActivationStateTensor*/ 18],
              &ann_index);
          mapping_args.model_state_outputs->push_back(ann_index);
          mapping_args.model_state_tfl_inputs->push_back(
              mapping_args.node->inputs
                  ->data[/*kInputActivationStateTensor*/ 18]);
          mapping_args.builder->AddStateFloat32Tensor(
              mapping_args.node->inputs->data[/*kInputCellStateTensor*/ 19],
              &ann_index);
          mapping_args.model_state_outputs->push_back(ann_index);
          mapping_args.model_state_tfl_inputs->push_back(
              mapping_args.node->inputs->data[/*kInputCellStateTensor*/ 19]);

          const bool hybrid_op = IsHybridOperator(
              mapping_args.context, kTfLiteBuiltinLstm, mapping_args.node);

          if (mapping_args.node->inputs->size == 24) {
            for (int i = 20; i < 24; ++i) {
              const auto input_index = mapping_args.node->inputs->data[i];
              if (input_index != kOptionalTensor) {
                mapping_args.builder->AddTensorInput(input_index, hybrid_op);
              } else {
                mapping_args.builder->AddVectorFloat32Operand(nullptr, 0);
              }
            }
          }

          return ANEURALNETWORKS_LSTM;
        };
      }
      break;
    case kTfLiteBuiltinMean:
      // NNAPI does not support generating a scalar as output for MEAN.
      if (version <= 2 &&
          ((android_sdk_version >= kMinSdkVersionForNNAPI11 &&
            context->tensors[node->inputs->data[0]].type == kTfLiteFloat32) ||
           (android_sdk_version >= kMinSdkVersionForNNAPI12 &&
            IsQuantized(context->tensors[node->inputs->data[0]].type))) &&
          context->tensors[node->outputs->data[0]].dims->size > 0) {
        auto input_param = context->tensors[node->inputs->data[0]].params;
        auto output_param = context->tensors[node->outputs->data[0]].params;
        // NNAPI requires that the input and output have the same
        // quantization parameters.
        if (input_param.scale != output_param.scale ||
            input_param.zero_point != output_param.zero_point) {
          return nullptr;
        }
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          auto builtin = reinterpret_cast<TfLiteReducerParams*>(
              mapping_args.node->builtin_data);
          int32_t keep_dims = 0;
          if (builtin->keep_dims) keep_dims = 1;
          mapping_args.builder->AddScalarInt32Operand(keep_dims);
          return ANEURALNETWORKS_MEAN;
        };
      }
      break;
    case kTfLiteBuiltinEmbeddingLookup:
      // NNAPI only support float32 values.
      if (version == 1 &&
          context->tensors[node->inputs->data[1]].type == kTfLiteFloat32) {
        return BasicMappingFn<ANEURALNETWORKS_EMBEDDING_LOOKUP>;
      }
      break;
    case kTfLiteBuiltinHashtableLookup:
      // NNAPI only support float32 output.
      if (version == 1 &&
          context->tensors[node->outputs->data[0]].type == kTfLiteFloat32) {
        return BasicMappingFn<ANEURALNETWORKS_HASHTABLE_LOOKUP>;
      }
      break;
    case kTfLiteBuiltinMaximum: {
      const auto input_type = context->tensors[node->inputs->data[0]].type;
      if (version <= 2 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          (input_type == kTfLiteFloat32 || input_type == kTfLiteUInt8 ||
           input_type == kTfLiteInt8 || input_type == kTfLiteInt32)) {
        return BasicMappingFn<ANEURALNETWORKS_MAXIMUM>;
      }
    } break;
    case kTfLiteBuiltinMinimum: {
      const auto input_type = context->tensors[node->inputs->data[0]].type;
      if (version <= 2 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          (input_type == kTfLiteFloat32 || input_type == kTfLiteUInt8 ||
           input_type == kTfLiteInt8 || input_type == kTfLiteInt32)) {
        return BasicMappingFn<ANEURALNETWORKS_MINIMUM>;
      }
    } break;
    case kTfLiteBuiltinCast: {
      const TfLiteType input_type =
          context->tensors[node->inputs->data[0]].type;
      const TfLiteType output_type =
          context->tensors[node->outputs->data[0]].type;
      auto is_supported_tensor_type = [](const TfLiteType& type) {
        return (type == kTfLiteFloat32 || type == kTfLiteInt32 ||
                type == kTfLiteUInt8);
      };
      if (version == 1 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          is_supported_tensor_type(input_type) &&
          is_supported_tensor_type(output_type)) {
        return BasicMappingFn<ANEURALNETWORKS_CAST>;
      }
    } break;
    case kTfLiteBuiltinPrelu:
      if (version == 1 && android_sdk_version >= kMinSdkVersionForNNAPI12) {
        if (!IsFloatOrUint8Operator(context, node)) {
          return nullptr;
        }
        return BasicMappingFn<ANEURALNETWORKS_PRELU>;
      }
      break;
    case kTfLiteBuiltinTile: {
      // NN API doesn't support int64 and boolean inputs to this op
      const auto input_type = context->tensors[node->inputs->data[0]].type;
      const auto multipliers_type =
          context->tensors[node->inputs->data[1]].type;
      if (version == 1 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          (input_type == kTfLiteFloat32 || input_type == kTfLiteUInt8 ||
           input_type == kTfLiteInt8 || input_type == kTfLiteInt32) &&
          (multipliers_type == kTfLiteInt32)) {
        return BasicMappingFn<ANEURALNETWORKS_TILE>;
      }
    } break;
    case kTfLiteBuiltinLogicalOr: {
      const auto input_type = context->tensors[node->inputs->data[0]].type;
      if (version == 1 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          input_type == kTfLiteBool) {
        return BasicMappingFn<ANEURALNETWORKS_LOGICAL_OR>;
      }
    } break;
    case kTfLiteBuiltinLogicalAnd: {
      const auto input_type = context->tensors[node->inputs->data[0]].type;
      if (version == 1 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          input_type == kTfLiteBool) {
        return BasicMappingFn<ANEURALNETWORKS_LOGICAL_AND>;
      }
    } break;
    case kTfLiteBuiltinLogicalNot: {
      const auto input_type = context->tensors[node->inputs->data[0]].type;
      if (version == 1 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          input_type == kTfLiteBool) {
        return BasicMappingFn<ANEURALNETWORKS_LOGICAL_NOT>;
      }
    } break;
    case kTfLiteBuiltinLess: {
      const auto input_type = context->tensors[node->inputs->data[0]].type;
      if (version <= 2 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          (input_type == kTfLiteFloat32 || input_type == kTfLiteUInt8 ||
           input_type == kTfLiteInt8 || input_type == kTfLiteBool ||
           input_type == kTfLiteInt32)) {
        return BasicMappingFn<ANEURALNETWORKS_LESS>;
      }
    } break;
    case kTfLiteBuiltinLessEqual: {
      const auto input_type = context->tensors[node->inputs->data[0]].type;
      if (version <= 2 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          (input_type == kTfLiteFloat32 || input_type == kTfLiteUInt8 ||
           input_type == kTfLiteInt8 || input_type == kTfLiteBool ||
           input_type == kTfLiteInt32)) {
        return BasicMappingFn<ANEURALNETWORKS_LESS_EQUAL>;
      }
    } break;
    case kTfLiteBuiltinGreater: {
      const auto input_type = context->tensors[node->inputs->data[0]].type;
      if (version <= 2 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          (input_type == kTfLiteFloat32 || input_type == kTfLiteUInt8 ||
           input_type == kTfLiteInt8 || input_type == kTfLiteBool ||
           input_type == kTfLiteInt32)) {
        return BasicMappingFn<ANEURALNETWORKS_GREATER>;
      }
    } break;
    case kTfLiteBuiltinGreaterEqual: {
      const auto input_type = context->tensors[node->inputs->data[0]].type;
      if (version <= 2 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          (input_type == kTfLiteFloat32 || input_type == kTfLiteUInt8 ||
           input_type == kTfLiteInt8 || input_type == kTfLiteBool ||
           input_type == kTfLiteInt32)) {
        return BasicMappingFn<ANEURALNETWORKS_GREATER_EQUAL>;
      }
    } break;
    case kTfLiteBuiltinEqual: {
      const auto input_type = context->tensors[node->inputs->data[0]].type;
      if (version <= 2 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          (input_type == kTfLiteFloat32 || input_type == kTfLiteUInt8 ||
           input_type == kTfLiteInt8 || input_type == kTfLiteBool ||
           input_type == kTfLiteInt32)) {
        return BasicMappingFn<ANEURALNETWORKS_EQUAL>;
      }
    } break;
    case kTfLiteBuiltinNotEqual: {
      const auto input_type = context->tensors[node->inputs->data[0]].type;
      if (version <= 2 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          (input_type == kTfLiteFloat32 || input_type == kTfLiteUInt8 ||
           input_type == kTfLiteInt8 || input_type == kTfLiteBool ||
           input_type == kTfLiteInt32)) {
        return BasicMappingFn<ANEURALNETWORKS_NOT_EQUAL>;
      }
    } break;
    case kTfLiteBuiltinNeg: {
      const auto input_type = context->tensors[node->inputs->data[0]].type;
      if (version == 1 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          (input_type == kTfLiteFloat32 || input_type == kTfLiteInt32)) {
        return BasicMappingFn<ANEURALNETWORKS_NEG>;
      }
    } break;
    case kTfLiteBuiltinTopkV2: {
      if (version <= 2 && android_sdk_version >= kMinSdkVersionForNNAPI12) {
        const auto& input = context->tensors[node->outputs->data[0]];
        const auto& k_param = context->tensors[node->inputs->data[1]];
        if ((input.type == kTfLiteFloat32 || input.type == kTfLiteInt32 ||
             input.type == kTfLiteUInt8 || input.type == kTfLiteInt8) &&
            (k_param.type == kTfLiteInt32 &&
             k_param.allocation_type == kTfLiteMmapRo)) {
          return [](const NNAPIOpMappingArgs& mapping_args)
                     -> ANeuralNetworksOperationType {
            const TfLiteTensor& k_param =
                mapping_args.context
                    ->tensors[mapping_args.node->inputs->data[1]];
            mapping_args.builder->AddScalarInt32Operand(*k_param.data.i32);
            return ANEURALNETWORKS_TOPK_V2;
          };
        } else {
          return nullptr;
        }
      }
    } break;
    case kTfLiteBuiltinSelect: {
      const auto value_type = context->tensors[node->inputs->data[1]].type;
      if (version <= 2 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          (value_type == kTfLiteFloat32 || value_type == kTfLiteUInt8 ||
           value_type == kTfLiteInt8 || value_type == kTfLiteInt32)) {
        TfLiteIntArray* condition_shape =
            context->tensors[node->inputs->data[0]].dims;
        TfLiteIntArray* input_shape =
            context->tensors[node->inputs->data[1]].dims;
        // The Android Q-variant of select does not support broadcasting.
        if (!TfLiteIntArrayEqual(condition_shape, input_shape)) {
          return nullptr;
        }
        return BasicMappingFn<ANEURALNETWORKS_SELECT>;
      }
    } break;
    case kTfLiteBuiltinGather: {
      if (version == 1 && android_sdk_version >= kMinSdkVersionForNNAPI12) {
        const auto& input = context->tensors[node->inputs->data[0]];
        const auto& positions = context->tensors[node->inputs->data[1]];

        auto is_supported_input_type = [](const TfLiteTensor& t) {
          return (t.type == kTfLiteFloat32 || t.type == kTfLiteFloat16 ||
                  t.type == kTfLiteInt32 || t.type == kTfLiteUInt8);
        };

        if (!is_supported_input_type(input) ||
            !is_supported_input_type(positions)) {
          return nullptr;
        }

        // 0-dimension args are not supported by NNAPI.
        if (positions.dims->size == 0) {
          return nullptr;
        }

        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          auto builtin = reinterpret_cast<TfLiteGatherParams*>(
              mapping_args.node->builtin_data);
          mapping_args.builder->AddTensorInput(
              mapping_args.node->inputs->data[0],
              /* hybrid_op */ false,
              /* scalar_as_tensor */ false);

          mapping_args.builder->AddScalarInt32Operand(builtin->axis);

          mapping_args.builder->AddTensorInput(
              mapping_args.node->inputs->data[1],
              /* hybrid_op */ false,
              /* scalar_as_tensor */ false);

          return ANEURALNETWORKS_GATHER;
        };
      }
    } break;
    case kTfLiteBuiltinBidirectionalSequenceLstm:
      if (version == 1 && android_sdk_version >= kMinSdkVersionForNNAPI12) {
        if (IsHybridOperator(context, builtin_code, node)) {
          // Hybrid version of this op is not supported by NN API.
          return nullptr;
        }
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          auto builtin =
              reinterpret_cast<TfLiteBidirectionalSequenceLSTMParams*>(
                  mapping_args.node->builtin_data);
          mapping_args.builder->AddScalarInt32Operand(builtin->activation);
          mapping_args.builder->AddScalarFloat32Operand(builtin->cell_clip);
          mapping_args.builder->AddScalarFloat32Operand(builtin->proj_clip);
          mapping_args.builder->AddScalarBoolOperand(builtin->merge_outputs);
          mapping_args.builder->AddScalarBoolOperand(builtin->time_major);
          // TF Lite doesn't support layer normalization in bidirectional
          // sequence LSTM, so we insert optional tensors for NNAPI
          for (int i = 0; i < 8; ++i) {
            mapping_args.builder->AddVectorFloat32Operand(nullptr, 0);
          }
          return ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM;
        };
      }
      break;
    case kTfLiteBuiltinExpandDims: {
      const auto input_type = context->tensors[node->inputs->data[0]].type;
      const auto axis = context->tensors[node->inputs->data[1]];
      if (version == 1 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          (input_type == kTfLiteFloat16 || input_type == kTfLiteFloat32 ||
           input_type == kTfLiteInt32 || input_type == kTfLiteUInt8 ||
           input_type == kTfLiteInt8) &&
          // TFLite supports axis also as int64 but NNAPI only int32
          (axis.type == kTfLiteInt32 &&
           axis.allocation_type == kTfLiteMmapRo)) {
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          const TfLiteTensor& axis_param =
              mapping_args.context->tensors[mapping_args.node->inputs->data[1]];
          mapping_args.builder->AddScalarInt32Operand(*axis_param.data.i32);
          return ANEURALNETWORKS_EXPAND_DIMS;
        };
      }
    } break;
    case kTfLiteBuiltinSplit: {
      // Tensor indices: split_dim: 0, value: 1
      const TfLiteTensor& axis = context->tensors[node->inputs->data[0]];
      const TfLiteTensor& input = context->tensors[node->inputs->data[1]];
      if (version == 1 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          (input.type == kTfLiteFloat32 || input.type == kTfLiteUInt8 ||
           input.type == kTfLiteInt32) &&
          (axis.type == kTfLiteInt32 &&
           axis.allocation_type == kTfLiteMmapRo)) {
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          const TfLiteTensor& axis =
              mapping_args.context->tensors[mapping_args.node->inputs->data[0]];
          auto builtin = reinterpret_cast<TfLiteSplitParams*>(
              mapping_args.node->builtin_data);
          mapping_args.builder->AddScalarInt32Operand(*axis.data.i32);
          mapping_args.builder->AddScalarInt32Operand(builtin->num_splits);
          return ANEURALNETWORKS_SPLIT;
        };
      }
    } break;
    case kTfLiteBuiltinLogSoftmax: {
      const auto input_type = context->tensors[node->inputs->data[0]].type;
      if (version == 1 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          input_type == kTfLiteFloat32) {
        return [](const NNAPIOpMappingArgs& mapping_args)
                   -> ANeuralNetworksOperationType {
          // Scaling and axis are hardcoded to respectively 1 and -1
          // in TFLite.
          mapping_args.builder->AddScalarFloat32Operand(1);
          mapping_args.builder->AddScalarInt32Operand(-1);
          return ANEURALNETWORKS_LOG_SOFTMAX;
        };
      }
    } break;
    case kTfLiteBuiltinQuantize: {
      const auto value_type = context->tensors[node->inputs->data[0]].type;
      const auto output_type = context->tensors[node->outputs->data[0]].type;
      const auto quantization_params =
          context->tensors[node->outputs->data[0]].params;
      if (version == 1 && android_sdk_version >= kMinSdkVersionForNNAPI12 &&
          value_type == kTfLiteFloat32 && output_type == kTfLiteUInt8 &&
          quantization_params.scale > 0.f) {
        return BasicMappingFn<ANEURALNETWORKS_QUANTIZE>;
      }
    } break;
    case kTfLiteBuiltinReduceAny: {
      if (version != 1 || android_sdk_version < kMinSdkVersionForNNAPI12) {
        return nullptr;
      }
      // NNAPI does not support generating a scalar as output for REDUCE_ANY.
      if (context->tensors[node->outputs->data[0]].dims->size == 0) {
        return nullptr;
      }
      return [](const NNAPIOpMappingArgs& mapping_args)
                 -> ANeuralNetworksOperationType {
        auto builtin = reinterpret_cast<TfLiteReducerParams*>(
            mapping_args.node->builtin_data);
        mapping_args.builder->AddScalarBoolOperand(builtin->keep_dims);
        return ANEURALNETWORKS_REDUCE_ANY;
      };
    } break;
    case kTfLiteBuiltinReduceMin: {
      if (version > 2 || android_sdk_version < kMinSdkVersionForNNAPI12) {
        return nullptr;
      }
      // NNAPI does not support generating a scalar as output for REDUCE_MIN.
      if (context->tensors[node->outputs->data[0]].dims->size == 0) {
        return nullptr;
      }
      return [](const NNAPIOpMappingArgs& mapping_args)
                 -> ANeuralNetworksOperationType {
        auto builtin = reinterpret_cast<TfLiteReducerParams*>(
            mapping_args.node->builtin_data);
        mapping_args.builder->AddScalarBoolOperand(builtin->keep_dims);
        return ANEURALNETWORKS_REDUCE_MIN;
      };
    } break;
    case kTfLiteBuiltinReduceMax: {
      if (version > 2 || android_sdk_version < kMinSdkVersionForNNAPI12) {
        return nullptr;
      }
      // NNAPI does not support generating a scalar as output for REDUCE_MAX.
      if (context->tensors[node->outputs->data[0]].dims->size == 0) {
        return nullptr;
      }
      return [](const NNAPIOpMappingArgs& mapping_args)
                 -> ANeuralNetworksOperationType {
        auto builtin = reinterpret_cast<TfLiteReducerParams*>(
            mapping_args.node->builtin_data);
        mapping_args.builder->AddScalarBoolOperand(builtin->keep_dims);
        return ANEURALNETWORKS_REDUCE_MAX;
      };
    } break;
    case kTfLiteBuiltinReduceProd: {
      if (version != 1 || android_sdk_version < kMinSdkVersionForNNAPI12) {
        return nullptr;
      }
      // NNAPI only supports floating point REDUCE_PROD.
      const auto input_type = context->tensors[node->inputs->data[0]].type;
      if (input_type != kTfLiteFloat32) {
        return nullptr;
      }
      // NNAPI does not support generating a scalar as output for REDUCE_PROD.
      if (context->tensors[node->outputs->data[0]].dims->size == 0) {
        return nullptr;
      }
      return [](const NNAPIOpMappingArgs& mapping_args)
                 -> ANeuralNetworksOperationType {
        auto builtin = reinterpret_cast<TfLiteReducerParams*>(
            mapping_args.node->builtin_data);
        mapping_args.builder->AddScalarBoolOperand(builtin->keep_dims);
        return ANEURALNETWORKS_REDUCE_PROD;
      };
    } break;
    case kTfLiteBuiltinSum: {
      if (version != 1 || android_sdk_version < kMinSdkVersionForNNAPI12) {
        return nullptr;
      }
      // NNAPI only supports floating point REDUCE_SUM.
      const auto input_type = context->tensors[node->inputs->data[0]].type;
      if (input_type != kTfLiteFloat32) {
        return nullptr;
      }
      // NNAPI does not support generating a scalar as output for REDUCE_SUM.
      if (context->tensors[node->outputs->data[0]].dims->size == 0) {
        return nullptr;
      }
      return [](const NNAPIOpMappingArgs& mapping_args)
                 -> ANeuralNetworksOperationType {
        auto builtin = reinterpret_cast<TfLiteReducerParams*>(
            mapping_args.node->builtin_data);
        mapping_args.builder->AddScalarBoolOperand(builtin->keep_dims);
        return ANEURALNETWORKS_REDUCE_SUM;
      };
    } break;
    default:
      // All other operators are not mapped.
      return nullptr;
  }
  return nullptr;
}