void DataOps::populate_op_mode_supported()

in onnxruntime/core/providers/openvino/ov_versions/data_ops.cc [212:1009]


void DataOps::populate_op_mode_supported() {
  no_dimension_supported_.push_back({"Unsqueeze", V_2020_4, {"All"}});
  no_dimension_supported_.push_back({"Squeeze", V_2020_4, {"All"}});
  no_dimension_supported_.push_back({"Cast", V_2020_4, {"All"}});
  no_dimension_supported_.push_back({"Gather", V_2020_4, {"All"}});
  no_dimension_supported_.push_back({"Mul", V_2020_4, {"All"}});
  no_dimension_supported_.push_back({"Sub", V_2020_4, {"All"}});
  no_dimension_supported_.push_back({"Min", V_2020_4, {"All"}});
  no_dimension_supported_.push_back({"Div", V_2020_4, {"All"}});
  no_dimension_supported_.push_back({"Floor", V_2020_4, {"All"}});
  no_dimension_supported_.push_back({"Where", V_2021_2, {"All"}});
  no_dimension_supported_.push_back({"Range", V_2021_2, {"All"}});
  no_dimension_supported_.push_back({"ArgMin", V_2021_2, {"MYRIAD"}});
  no_dimension_supported_.push_back({"Max", V_2021_2, {"MYRIAD"}});
  no_dimension_supported_.push_back({"Add", V_2021_2, {"MYRIAD"}});
  no_dimension_supported_.push_back({"Less", V_2021_2, {"MYRIAD"}});
  no_dimension_supported_.push_back({"Greater", V_2021_2, {"MYRIAD"}});
  no_dimension_supported_.push_back({"Clip", V_2021_2, {"MYRIAD"}});
  no_dimension_supported_.push_back({"Resize", V_2021_2, {"MYRIAD"}});
  no_dimension_supported_.push_back({"Equal", V_2021_2, {"MYRIAD"}});
  no_dimension_supported_.push_back({"Reshape", V_2021_3, {"MYRIAD"}});
  no_dimension_supported_.push_back({"Ceil", V_2021_3, {"MYRIAD"}});
  no_dimension_supported_.push_back({"Ceil", V_2021_4, {"All"}});
  no_dimension_supported_.push_back({"Loop", V_2021_3, {"MYRIAD"}});
  no_dimension_supported_.push_back({"Loop", V_2021_4, {"All"}});
  no_dimension_supported_.push_back({"ReduceMin", V_2021_3, {"MYRIAD"}});
  no_dimension_supported_.push_back({"ReduceMin", V_2021_4, {"All"}});
  no_dimension_supported_.push_back({"ReduceMax", V_2021_4, {"All"}});
  no_dimension_supported_.push_back({"QuantizeLinear", V_2021_4, {"All"}});
  no_dimension_supported_.push_back({"DequantizeLinear", V_2021_4, {"All"}});
  

  subgraph_supported_.push_back({"Mul", V_2020_4, {"All"}});
  subgraph_supported_.push_back({"Transpose", V_2020_4, {"All"}});
  subgraph_supported_.push_back({"Unsqueeze", V_2020_4, {"All"}});
  subgraph_supported_.push_back({"Cast", V_2020_4, {"All"}});
  subgraph_supported_.push_back({"Concat", V_2020_4, {"All"}});
  subgraph_supported_.push_back({"Gather", V_2020_4, {"All"}});
  subgraph_supported_.push_back({"Div", V_2020_4, {"MYRIAD"}});
  subgraph_supported_.push_back({"Sub", V_2020_4, {"MYRIAD"}});
  subgraph_supported_.push_back({"Identity", V_2021_1, {"CPU"}});
  subgraph_supported_.push_back({"Div", V_2021_1, {"CPU"}});
  subgraph_supported_.push_back({"Sub", V_2021_1, {"CPU"}});

  //populate unsupportedmode_t
  {
    UnsupportedOpMode obj = {{V_2020_4, V_2021_1, V_2021_2, V_2021_3, V_2021_4},
                             [this](const Node* node, const InitializedTensorSet&) {
                               for (size_t i = 0; i < node->InputDefs().size(); i++) {
                                 if (node->InputDefs()[i]->TypeAsProto()->tensor_type().elem_type() != ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT)
                                   return true;
                               }
                               return false;
                             }};
    op_list_.insert({"Abs", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2020_4, V_2021_1, V_2021_2, V_2021_3, V_2021_4},
                             [this](const Node* node, const InitializedTensorSet&) {
                               //tensor type does not support select last index
                               auto& attributes = node->GetAttributes();
                               auto last_index_arg = attributes.count("select_last_index") > 0 ? attributes.at("select_last_index").i() : 0;
                               if (last_index_arg != 0)
                                 return true;
                               // tensor type supports float as input for argmax and argmin
                               if (node->InputDefs()[0]->TypeAsProto()->tensor_type().elem_type() != ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT)
                                 return true;
                               return false;
                             }};
    op_list_.insert({"ArgMax", obj});
    op_list_.insert({"ArgMin", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2020_4},
                             [this](const Node* node, const InitializedTensorSet&) {
                               // ceil_mode attribute is not supported in nGraph
                               const auto& attributes = node->GetAttributes();
                               auto ceil_attr = attributes.find("ceil_mode");
                               // default value of ceil_mode (0) is supported.
                               if (ceil_attr != attributes.end() && ceil_attr->second().i() != 0) return true;
                               return (!dimension_unsupported(node));
                             }};
    op_list_.insert({"AveragePool", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2021_1, V_2021_2, V_2021_3, V_2021_4},
                             [this](const Node* node, const InitializedTensorSet&) {
                               //auto pad null value is not supported
                               const auto& attributes = node->GetAttributes();
                               auto auto_attr = attributes.find("auto_pad");
                               if (auto_attr->second().s() == "") {
                                 return true;
                               }
                               // default value of ceil_mode (0) is supported.
                               auto ceil_attr = attributes.find("ceil_mode");
                               if (ceil_attr != attributes.end() && ceil_attr->second().i() != 0) return true;
                               return (!dimension_unsupported(node));
                             }};
    op_list_.insert({"AveragePool", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2020_4, V_2021_1, V_2021_2, V_2021_3, V_2021_4},
                             [this](const Node* node, const InitializedTensorSet&) {
                               //Only float 16, float and double data types are supported
                               const bool data_is_float = node->InputDefs()[0]->Type()->find("float") != std::string::npos;
                               const bool data_is_float16 = node->InputDefs()[0]->Type()->find("float16") != std::string::npos;
                               const bool data_is_double = node->InputDefs()[0]->Type()->find("double") != std::string::npos;
                               return !(data_is_float || data_is_float16 || data_is_double);
                             }};
    op_list_.insert({"Clip", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2020_4, V_2021_1, V_2021_2, V_2021_3},
                             [this](const Node* node, const InitializedTensorSet& initializers) {
                               if (GetInputCount(node, initializers) > 1)
                                 return true;
                               return false;
                             }};
    op_list_.insert({"Conv", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2021_4},
                             [this](const Node* node, const InitializedTensorSet& ) {
                               if (device_id_.find("MYRIAD") != std::string::npos) {
                                  const auto& attributes = node->GetAttributes();
                                  auto conv_filter = attributes.find("kernel_shape");
                                  auto& ints = conv_filter->second().ints();
                                  //If the kernel size is not 2D, the op is rejected in case of MYRIAD
                                  if(ints.size() !=2) {
                                    return true;
                                  }
                               }
                               //If the device is GPU
                               if (device_id_.find("GPU") != std::string::npos) {
                                  bool if_bias = false;
                                  const auto& attributes = node->GetAttributes();
                                  auto conv_filter = attributes.find("kernel_shape");
                                  if (conv_filter != attributes.end()) {
                                  auto& ints = conv_filter->second().ints();
                                  //check if the Input for the op has bias
                                   if(node->InputDefs().size() > 2) {
                                     if(node->InputDefs()[2]->Name() == "B")
                                       if_bias = true;
                                   }
                                  //If the kernel size is 1D and the input has bias and the output precision is FP32, the op is rejected.
                                  if(ints.size() == 1 && if_bias) {
                                    auto output_data_type = node->OutputDefs()[0]->TypeAsProto()->tensor_type().elem_type();
                                    if (output_data_type == ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT)
                                      return true;
                                  }
                                  //If the kernel size is 3D and the input doesnot have bias, the op is rejected in case of GPU
                                  if(ints.size() == 3 && !if_bias)
                                    return true;
                                  }
                                }
                                return false;
                             }};
    op_list_.insert({"Conv", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2021_1, V_2021_2, V_2021_3, V_2021_4},
                             [this](const Node* node, const InitializedTensorSet&) {
                               auto& attributes = node->GetAttributes();
                               if (attributes.count("auto_pad") == 0 || attributes.at("auto_pad").s() == "") {
                                 return true;
                               }
                               return false;
                             }};
    op_list_.insert({"Conv", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2020_4, V_2021_1, V_2021_2, V_2021_3},
                             [this](const Node* node, const InitializedTensorSet& initializers) {
                               if (GetInputCount(node, initializers) > 1)
                                 return true;
                               return false;
                             }};
    op_list_.insert({"ConvTranspose", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2021_4},
                             [this](const Node* node, const InitializedTensorSet& initializers) {
                               if (device_id_.find("MYRIAD") != std::string::npos) {
                                 if (GetInputCount(node, initializers) > 1)
                                  return true;
                               }
                               bool if_bias = false;
                               const auto& attributes = node->GetAttributes();
                               auto out_shape_attr = attributes.find("output_shape");

                               // If the device is GPU
                               if (device_id_.find("GPU") != std::string::npos) {
                                 auto conv_filter = attributes.find("kernel_shape");
                                 if (conv_filter != attributes.end()) {
                                   auto& kernel_size = conv_filter->second().ints();

                                   //If 3D convolution, reject the op
                                   if(kernel_size.size() == 3)
                                     return true;
                                   //In 1D conv, if the pads are asymmetric, then the op is rejected
                                   if(kernel_size.size() == 1) {
                                     if (attributes.count("pads") > 0) {
                                     auto& pads_attr = attributes.at("pads");
                                     auto int_size = pads_attr.ints_size();
                                     if (int_size > 1 && (pads_attr.ints(0) != pads_attr.ints(1)))
                                       return true;
                                     }
                                   }
                                   //check if the Input for the op has bias
                                   if(node->InputDefs().size() > 2) {
                                     if(node->InputDefs()[2]->Name() == "B")
                                       if_bias = true;
                                   }
                                   //If the kernel size is 2D, the input has no bias, the padding is 0 and the op has dilations, the op is rejected
                                   if(kernel_size.size() == 2 && !if_bias) {
                                     if (attributes.count("pads") > 0) {
                                     auto& pads_attr = attributes.at("pads");
                                     auto int_size = pads_attr.ints_size();
                                     // comparing if all the 4 values in the padding are equal to 1
                                     if (int_size == 4 && (pads_attr.ints(0) == pads_attr.ints(1) == pads_attr.ints(2) == pads_attr.ints(3))) {
                                       if(pads_attr.ints(0) == 1)
                                           return false;
                                     }
                                     // If the op has dilations as an attribute after the above checks, then the op is rejected.
                                     auto dilation_attr = attributes.find("dilations");
                                     if (dilation_attr != attributes.end())
                                       return true;
                                     }
                                   }
                                  }
                                 }
                                 return false;
                             }};
    op_list_.insert({"ConvTranspose", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2021_1, V_2021_2, V_2021_3, V_2021_4},
                             [this](const Node* node, const InitializedTensorSet&) {
                               auto& attributes = node->GetAttributes();
                               if (attributes.count("auto_pad") == 0 || attributes.at("auto_pad").s() == "") {
                                 return true;
                               }
                               return false;
                             }};
    op_list_.insert({"ConvTranspose", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2021_2},
                             [this](const Node* node, const InitializedTensorSet&) {
                               if (device_id_.find("MYRIAD") != std::string::npos) {
                                 const auto& input_arg = node->InputDefs()[0];
                                 auto shape = input_arg->Shape();
                                 if ((shape != nullptr) && (shape->dim(0).value_case() != shape->dim(0).kDimValue)) {
                                   return true;
                                 }
                               }
                               return false;
                             }};
    op_list_.insert({"ConvTranspose", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2020_4, V_2021_1, V_2021_2, V_2021_3, V_2021_4},
                             [this](const Node* node, const InitializedTensorSet& initializers) {
                               // all ConvInteger zero points need to be constants
                               if (node->InputDefs().size() == 3) {
                                 return (initializers.find(node->InputDefs()[2]->Name()) == initializers.end());
                               } else if (node->InputDefs().size() == 4) {
                                 return initializers.find(node->InputDefs()[2]->Name()) == initializers.end() ||
                                        initializers.find(node->InputDefs()[3]->Name()) == initializers.end();
                               }
                               return false;
                             }};
    op_list_.insert({"ConvInteger", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2020_4, V_2021_1},
                             [this](const Node* node, const InitializedTensorSet&) {
                               using onnx_dtype = ONNX_NAMESPACE::TensorProto_DataType;
                               auto supportedOps = std::set<std::vector<onnx_dtype>>{
                                   {onnx_dtype::TensorProto_DataType_FLOAT, onnx_dtype::TensorProto_DataType_FLOAT, onnx_dtype::TensorProto_DataType_FLOAT},
                                   {onnx_dtype::TensorProto_DataType_FLOAT, onnx_dtype::TensorProto_DataType_INT8, onnx_dtype::TensorProto_DataType_FLOAT},
                                   {onnx_dtype::TensorProto_DataType_FLOAT, onnx_dtype::TensorProto_DataType_FLOAT, onnx_dtype::TensorProto_DataType_INT8},
                                   {onnx_dtype::TensorProto_DataType_FLOAT, onnx_dtype::TensorProto_DataType_UINT8, onnx_dtype::TensorProto_DataType_FLOAT},
                                   {onnx_dtype::TensorProto_DataType_FLOAT, onnx_dtype::TensorProto_DataType_FLOAT, onnx_dtype::TensorProto_DataType_UINT8},
                                   {onnx_dtype::TensorProto_DataType_INT8, onnx_dtype::TensorProto_DataType_INT8, onnx_dtype::TensorProto_DataType_INT8},
                                   {onnx_dtype::TensorProto_DataType_INT8, onnx_dtype::TensorProto_DataType_INT8, onnx_dtype::TensorProto_DataType_UINT8},
                                   {onnx_dtype::TensorProto_DataType_INT8, onnx_dtype::TensorProto_DataType_UINT8, onnx_dtype::TensorProto_DataType_INT8},
                                   {onnx_dtype::TensorProto_DataType_INT32, onnx_dtype::TensorProto_DataType_INT32, onnx_dtype::TensorProto_DataType_INT32},
                                   {onnx_dtype::TensorProto_DataType_FLOAT, onnx_dtype::TensorProto_DataType_UINT8, onnx_dtype::TensorProto_DataType_FLOAT},
                                   {onnx_dtype::TensorProto_DataType_FLOAT, onnx_dtype::TensorProto_DataType_FLOAT, onnx_dtype::TensorProto_DataType_UINT8}};

                               if (node->OpType() == "Equal") {
                                 supportedOps.insert(std::vector<onnx_dtype>{onnx_dtype::TensorProto_DataType_UINT8, onnx_dtype::TensorProto_DataType_INT32, onnx_dtype::TensorProto_DataType_INT32}),
                                     supportedOps.insert(std::vector<onnx_dtype>{onnx_dtype::TensorProto_DataType_UINT8, onnx_dtype::TensorProto_DataType_FLOAT, onnx_dtype::TensorProto_DataType_FLOAT});
                               }

                               onnx_dtype input_0_data_type = (ONNX_NAMESPACE::TensorProto_DataType)node->InputDefs()[0]->TypeAsProto()->tensor_type().elem_type();
                               onnx_dtype input_1_data_type = (ONNX_NAMESPACE::TensorProto_DataType)node->InputDefs()[1]->TypeAsProto()->tensor_type().elem_type();
                               onnx_dtype output_data_type = (ONNX_NAMESPACE::TensorProto_DataType)node->OutputDefs()[0]->TypeAsProto()->tensor_type().elem_type();
                               const std::vector<onnx_dtype> typePair{output_data_type, input_0_data_type, input_1_data_type};
                               const auto match = supportedOps.find(typePair);
                               if (match == supportedOps.end())
                                 return true;
                               else
                                 return false;
                             }};
    op_list_.insert({"Equal", obj});
    op_list_.insert({"And", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2021_1, V_2021_2, V_2021_3},
                             [this](const Node* node, const InitializedTensorSet&) {
                               if (device_id_.find("GPU") != std::string::npos) {
                                 const auto& input = node->InputDefs()[0];
                                 auto graph_inputs = graph_viewer_.GetInputs();
                                 auto it = find(graph_inputs.begin(), graph_inputs.end(), input);
                                 if (it != graph_inputs.end()) {
                                   const auto& indices_arg = node->InputDefs()[1];
                                   if (indices_arg->TypeAsProto()->tensor_type().elem_type() == ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT64)
                                     return true;
                                 }
                               }
                               return false;
                             }};
    op_list_.insert({"Gather", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2021_4},
                             [this](const Node* node, const InitializedTensorSet&) {
                               if (device_id_.find("GPU") != std::string::npos) {
                                 const auto& input = node->InputDefs()[0];
                                 auto graph_inputs = graph_viewer_.GetInputs();
                                 auto it = find(graph_inputs.begin(), graph_inputs.end(), input);
                                 if (it != graph_inputs.end()) {
                                   const auto& indices_arg = node->InputDefs()[1];
                                   if (indices_arg->TypeAsProto()->tensor_type().elem_type() == ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT64)
                                     return true;
                                 }
                                 auto output_data_type = node->OutputDefs()[0]->TypeAsProto()->tensor_type().elem_type();
                                 //If the output of Gather op is INT8 or UINT8, it is rejected for GPU.
                                 if (output_data_type == ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT8 ||
                                    output_data_type == ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT8)
                                   return true;
                               }
                               return false;
                             }};
    op_list_.insert({"Gather", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2021_3, V_2021_4},
                             [this](const Node* node, const InitializedTensorSet&) {
                               const auto& indices_arg = node->InputDefs()[0];
                               const auto& output_arg = node->OutputDefs()[0];
                               if (indices_arg->TypeAsProto()->tensor_type().elem_type() != output_arg->TypeAsProto()->tensor_type().elem_type())
                                 return true;
                               if ((indices_arg->TypeAsProto()->tensor_type().elem_type() == ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT16) ||
                                   (indices_arg->TypeAsProto()->tensor_type().elem_type() == ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT)) {
                                 return false;
                               }
                               return true;
                             }};
    op_list_.insert({"GatherElements", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2020_4, V_2021_1, V_2021_2, V_2021_3, V_2021_4},
                             [this](const Node* node, const InitializedTensorSet&) {
                               const auto& input = node->InputDefs()[0];
                               const auto& output = node->OutputDefs()[0];
                               auto graph_inputs = this->graph_viewer_.GetInputs();
                               auto graph_outputs = this->graph_viewer_.GetOutputs();
                               auto input_it = find(graph_inputs.begin(), graph_inputs.end(), input);
                               auto output_it = find(graph_outputs.begin(), graph_outputs.end(), output);
                               if (input_it != graph_inputs.end() && output_it != graph_outputs.end())
                                 return true;
                               return false;
                             }};
    op_list_.insert({"Identity", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2021_3, V_2021_4},
                             [this](const Node* node, const InitializedTensorSet& initializers) {
                               //Loop has to be initializer
                               const auto& cond = node->InputDefs()[1];
                               return (initializers.find(cond->Name()) == initializers.end());
                             }};
    op_list_.insert({"Loop", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2021_3, V_2021_4},
                             [this](const Node* node, const InitializedTensorSet&) {
                               //MaxPool "indices" output is not currently supported.
                               //if (node->OutputDefs().size() > 1)
                               //  return true;
                               const auto& attributes = node->GetAttributes();
                               /* default value of ceil_mode (0) is supported.
      auto ceil_attr = attributes.find("ceil_mode");
      if (ceil_attr != attributes.end() && ceil_attr->second().i() != 0)
        return true;*/
                               auto auto_attr = attributes.find("auto_pad");
                               //auto pad null value is not supported
                               if (auto_attr->second().s() == "")
                                 return true;
                               // dilations attrs are not supported in nGraph
                               if (attributes.find("dilations") != attributes.end())
                                 return true;
                               // uint8 as output type for maxpool op is not supported on GPU
                               if (device_id_.find("GPU") != std::string::npos) {
                                 auto output_data_type = node->OutputDefs()[0]->TypeAsProto()->tensor_type().elem_type();
                                 if (output_data_type == ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT8)
                                  return true;
                               }
                               return (!this->dimension_unsupported(node));
                             }};
    op_list_.insert({"MaxPool", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2020_4, V_2021_1, V_2021_2},
                             [this](const Node* node, const InitializedTensorSet&) {
                               //MaxPool "indices" output is not currently supported.
                               if (node->OutputDefs().size() > 1)
                                 return true;
                               const auto& attributes = node->GetAttributes();
                               // default value of ceil_mode (0) is supported.
                               auto ceil_attr = attributes.find("ceil_mode");
                               if (ceil_attr != attributes.end() && ceil_attr->second().i() != 0)
                                 return true;
                               auto auto_attr = attributes.find("auto_pad");
                               //auto pad null value is not supported
                               if (auto_attr->second().s() == "")
                                 return true;
                               // dilations attrs are not supported in nGraph
                               if (attributes.find("dilations") != attributes.end())
                                 return true;
                               return (!this->dimension_unsupported(node));
                             }};
    op_list_.insert({"MaxPool", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2021_2, V_2021_3, V_2021_4},
                             [this](const Node* node, const InitializedTensorSet& initializers) {
                               if (device_id_.find("MYRIAD") == std::string::npos) {
                                 if (GetInputCount(node, initializers) == 1)
                                   return true;
                               }
                               return false;
                             }};
    op_list_.insert({"Max", obj});
    op_list_.insert({"Min", obj});
    op_list_.insert({"Mean", obj});
    op_list_.insert({"Sum", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2020_4, V_2021_1},
                             [this](const Node* node, const InitializedTensorSet& initializers) {
                               if (GetInputCount(node, initializers) == 1)
                                 return true;
                               return false;
                             }};
    op_list_.insert({"Mean", obj});
    op_list_.insert({"Sum", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2020_4, V_2021_1},
                             [this](const Node* node, const InitializedTensorSet& initializers) {
                               if (GetInputCount(node, initializers) == 1)
                                 return true;
                               for (size_t i = 0; i < node->InputDefs().size(); i++) {
                                 auto dtype = node->InputDefs()[i]->TypeAsProto()->tensor_type().elem_type();
                                 if (dtype == ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT8 ||
                                     dtype == ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT16)
                                   return true;
                               }
                               return false;
                             }};
    op_list_.insert({"Max", obj});
    op_list_.insert({"Min", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2020_4, V_2021_1, V_2021_2, V_2021_3, V_2021_4},
                             [this](const Node* node, const InitializedTensorSet&) {
                               //All matmuls except float have computation missmatch
                               const bool A_is_float = node->InputDefs()[0]->Type()->find("float") != std::string::npos;
                               const bool B_is_float = node->InputDefs()[1]->Type()->find("float") != std::string::npos;
                               return (A_is_float && B_is_float) ? false : true;
                             }};
    op_list_.insert({"MatMul", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2020_4, V_2021_1, V_2021_2, V_2021_3, V_2021_4},
                             [this](const Node* node, const InitializedTensorSet& initializers) {
                               // all MatMulInteger zero points need to be constants
                               if (node->InputDefs().size() == 3) {
                                 // not found in initializers -> not const
                                 return initializers.find(node->InputDefs()[2]->Name()) == initializers.end();
                               } else if (node->InputDefs().size() == 4) {
                                 // not found in initializers -> not const
                                 return ((initializers.find(node->InputDefs()[2]->Name()) == initializers.end()) ||
                                         (initializers.find(node->InputDefs()[2]->Name()) == initializers.end()));
                               }
                               return false;
                             }};
    op_list_.insert({"MatMulInteger", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2020_4, V_2021_2},
                             [this](const Node* node, const InitializedTensorSet&) {
                               //Only FP32 data type is allowed
                               auto& attributes = node->GetAttributes();
                               auto fmod = attributes.count("fmod") > 0 ? attributes.at("fmod").i() : 0;
                               if (fmod != 1) return true;
                               //Only FP32 data type is allowed
                               for (const auto& input : node->InputDefs()) {
                                 if (input->Type()->find("float") == std::string::npos)
                                   return true;
                               }
                               return false;
                             }};
    op_list_.insert({"Mod", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2021_4},
                             [this](const Node* node, const InitializedTensorSet&) {
                               if (device_id_.find("GPU") != std::string::npos) {
                                auto output_data_type = node->OutputDefs()[0]->TypeAsProto()->tensor_type().elem_type();
                                //If the output of Neg op is INT8, it is rejected for GPU.
                                if (output_data_type == ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT8)
                                  return true;
                               }
                               return false;
                             }};
    op_list_.insert({"Neg", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2021_2, V_2021_3, V_2021_4},
                             [this](const Node* node, const InitializedTensorSet&) {
                               auto graph_outputs = graph_viewer_.GetOutputs();
                               const auto& output = node->OutputDefs()[0];
                               auto output_it = find(graph_outputs.begin(), graph_outputs.end(), output);
                               if (output_it != graph_outputs.end())
                                 return true;
                               return false;
                             }};
    op_list_.insert({"NonMaxSuppression", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2020_4, V_2021_1},
                             [this](const Node* node, const InitializedTensorSet&) {
                               //Only supported if the data type of both inputs is same
                               auto x_data_type = node->InputDefs()[0]->TypeAsProto()->tensor_type().elem_type();
                               auto y_data_type = node->InputDefs()[1]->TypeAsProto()->tensor_type().elem_type();
                               return x_data_type != y_data_type;
                             }};
    op_list_.insert({"Pow", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2021_2, V_2021_3, V_2021_4},
                             [this](const Node* node, const InitializedTensorSet&) {
                               if (device_id_.find("GPU") != std::string::npos) {
                                 auto x_data_type = node->InputDefs()[0]->TypeAsProto()->tensor_type().elem_type();
                                 auto y_data_type = node->InputDefs()[1]->TypeAsProto()->tensor_type().elem_type();
                                 return x_data_type != y_data_type;
                               }
                               //currently both inputs with int32 or int64 datatype are not supported
                               const bool A_is_int32 = node->InputDefs()[0]->Type()->find("int32") != std::string::npos;
                               const bool B_is_int32 = node->InputDefs()[1]->Type()->find("int32") != std::string::npos;
                               const bool A_is_int64 = node->InputDefs()[0]->Type()->find("int64") != std::string::npos;
                               const bool B_is_int64 = node->InputDefs()[1]->Type()->find("int64") != std::string::npos;
                               if ((A_is_int32 && B_is_int32) || (A_is_int64 && B_is_int64))
                                 return true;
                               return false;
                             }};
    op_list_.insert({"Pow", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2020_4, V_2021_1, V_2021_2, V_2021_3, V_2021_4},
                             [this](const Node* node, const InitializedTensorSet& initializers) {
                               auto slope = node->InputDefs()[1];
                               //PRelu slope has to be an initializer or needs to come from a constant node
                               if (initializers.count(slope->Name()))
                                 return false;
                               else {
                                 for (auto input_node = node->InputNodesBegin(); input_node != node->InputNodesEnd(); ++input_node) {
                                   if (GetInputCount(this->graph_viewer_.GetNode((*input_node).Index()), initializers) == 0)
                                     return false;
                                 }
                               }
                               return true;
                             }};
    op_list_.insert({"PRelu", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2020_4, V_2021_1, V_2021_2, V_2021_3, V_2021_4},
                             [this](const Node* node, const InitializedTensorSet& initializers) {
                               bool non_const_zero_point = false;
                               // check if any of the zero points is NOT in the initializers list
                               non_const_zero_point |= initializers.find(node->InputDefs()[2]->Name()) == initializers.end();
                               non_const_zero_point |= initializers.find(node->InputDefs()[5]->Name()) == initializers.end();
                               non_const_zero_point |= initializers.find(node->InputDefs()[7]->Name()) == initializers.end();
                               // QLinearMatMul is not supported if any of the zero points is a dynamic input
                               return non_const_zero_point;
                             }};
    op_list_.insert({"QLinearMatMul", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2020_4, V_2021_1},
                             [this](const Node* node, const InitializedTensorSet&) {
                               //Only FP32, INT32 and U8 data types are supported
                               const bool data_is_float = node->InputDefs()[0]->Type()->find("float") != std::string::npos;
                               const bool data_is_int32 = node->InputDefs()[0]->Type()->find("int32") != std::string::npos;
                               const bool data_is_u8 = node->InputDefs()[0]->Type()->find("uint8") != std::string::npos;
                               return !(data_is_float || data_is_int32 || data_is_u8);
                             }};
    op_list_.insert({"ReduceMin", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2020_4, V_2021_1},
                             [this](const Node* node, const InitializedTensorSet&) {
                               //Resize opset 11 is not supported
                               if (node->InputDefs().size() > 2)
                                 return true;
                               return false;
                             }};
    op_list_.insert({"Resize", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2021_4},
                             [this](const Node* node, const InitializedTensorSet&) {
                               if (device_id_.find("MYRIAD") != std::string::npos) {
                                 const auto& input_arg = node->InputDefs()[1];
                                 auto shape = input_arg->Shape();
                                 //Reshape op with empty dim is Rejected for Myriad
                                 if (shape != nullptr) {
                                   for (const auto& dim : input_arg->Shape()->dim()) {
                                     if (utils::HasDimValue(dim) && dim.dim_value() == 0)
                                       return true;
                                   }
                                 }
                               }
                               return false;
                             }};
    op_list_.insert({"Reshape", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2021_4},
                             [this](const Node* node, const InitializedTensorSet&) {
                                if (device_id_.find("GPU") != std::string::npos) {
                                 auto output_data_type = node->OutputDefs()[0]->TypeAsProto()->tensor_type().elem_type();
                                 //If the output of Transpose op is INT8 or UINT8, it is rejected for GPU.
                                 if (output_data_type == ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT8 || 
                                   output_data_type == ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT8)
                                   return true;
                               }
                               return false;
                             }};
    op_list_.insert({"ReduceMax", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2021_2, V_2021_3, V_2021_4},
                             [this](const Node* node, const InitializedTensorSet&) {
                               const auto& attributes = node->GetAttributes();
                               auto axis_attr = attributes.find("axis");
                               //Negative axis is not supported
                               if (axis_attr->second().i() < 0)
                                 return true;
                               if (device_id_.find("MYRIAD") != std::string::npos) {
                                 const auto& input_arg = node->InputDefs()[2];
                                 auto updates_shape = input_arg->Shape();
                                 const auto& output_arg = node->OutputDefs()[0];
                                 auto out_shape = output_arg->Shape();
                                 //If updates attribute dim value greater than output_shape dim value, we reject
                                 if(node->InputDefs()[2]->Name() == "updates")
                                 {
                                  size_t updates_size = updates_shape->dim_size();
                                  if(updates_size == 2) {
                                    if(updates_shape->dim(1).dim_value() > out_shape->dim(1).dim_value())
                                      return true;
                                  }
                                  }
                               }
                               return false;
                             }};
    op_list_.insert({"Scatter", obj});
    op_list_.insert({"ScatterElements", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2020_4, V_2021_1, V_2021_2, V_2021_3, V_2021_4},
                             [this](const Node* node, const InitializedTensorSet& initializers) {
                               //start, end, axes need to be a initializer
                               bool cond_for_slice = false;
                               const auto& data_arg = node->InputDefs()[0];
                               auto graph_inputs = graph_viewer_.GetInputs();

                               auto it = find(graph_inputs.begin(), graph_inputs.end(), data_arg);
                               if (it != graph_inputs.end()) {
                                 if (node->InputDefs().size() > 1) {
                                   const auto& start_arg = node->InputDefs()[1];
                                   const auto& end_arg = node->InputDefs()[2];
                                   cond_for_slice |= initializers.find(start_arg->Name()) == initializers.end();
                                   cond_for_slice |= initializers.find(end_arg->Name()) == initializers.end();
                                 }
                                 if (node->InputDefs().size() > 3) {
                                   const auto& axes_arg = node->InputDefs()[3];
                                   cond_for_slice |= initializers.find(axes_arg->Name()) == initializers.end();
                                 }
                               }

                               return cond_for_slice;
                             }};
    op_list_.insert({"Slice", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2020_4, V_2021_1, V_2021_2, V_2021_3, V_2021_4},
                             [this](const Node* node, const InitializedTensorSet&) {
                               //Shape can't have empty axes attribute
                               const auto& attributes = node->GetAttributes();
                               if (attributes.count("axes") == 0)
                                 return true;
                               return false;
                             }};
    op_list_.insert({"Squeeze", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2020_4},
                             [this](const Node* node, const InitializedTensorSet&) {
                               return node->InputDefs().size() > 1;
                             }};
    op_list_.insert({"TopK", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2021_4},
                             [this](const Node* node, const InitializedTensorSet&) {
                               if (device_id_.find("GPU") != std::string::npos) {
                                auto output_data_type = node->OutputDefs()[0]->TypeAsProto()->tensor_type().elem_type();
                                //If the output of Transpose op is INT8, it is rejected for GPU.
                                if (output_data_type == ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT8)
                                  return true;
                               }
                               return false;
                             }};
    op_list_.insert({"Transpose", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2020_4, V_2021_2, V_2021_3, V_2021_4},
                             [this](const Node* node, const InitializedTensorSet&) {
                               return (!this->dimension_unsupported(node));
                             }};
    op_list_.insert({"Unsqueeze", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2021_1, V_2021_2, V_2021_3, V_2021_4},
                             [this](const Node* node, const InitializedTensorSet&) {
                               //check for attributes
                               auto& upsample_attr = node->GetAttributes();
                               if (upsample_attr.count("scales") > 0) {
                                 auto& upsample_arg = upsample_attr.at("scales");
                                 auto float_size = upsample_arg.floats_size();
                                 if (float_size > 2 && (upsample_arg.floats(0) != 1.f || upsample_arg.floats(1) != 1.f)) {
                                   return true;
                                 }
                               }

                               //check for input dimensions
                               const auto& x_arg = node->InputDefs()[0];
                               auto shape = x_arg->Shape();
                               if (shape != nullptr) {
                                 //input tensor rank cannot be of one dimension
                                 if (shape->dim_size() == 1) {
                                   return true;
                                 }
                               }
                               // x_arg supports only float, int8 and float16 type
                               if ((x_arg->TypeAsProto()->tensor_type().elem_type() == ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT) ||
                                   (x_arg->TypeAsProto()->tensor_type().elem_type() == ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT8) ||
                                   (x_arg->TypeAsProto()->tensor_type().elem_type() == ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT16)) {
                                 return false;
                               } else {
                                 return true;
                               }
                             }};
    op_list_.insert({"Upsample", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2021_2},
                             [this](const Node* node, const InitializedTensorSet&) {
                               //float data type is not supported
                               const bool data_is_float = node->InputDefs()[1]->Type()->find("float") != std::string::npos;
                               return data_is_float;
                             }};
    op_list_.insert({"Where", obj});
  }
  {
    UnsupportedOpMode obj = {{V_2021_3, V_2021_4},
                             [this](const Node* node, const InitializedTensorSet&) {
                               return (!this->dimension_unsupported(node));
                             }};
    op_list_.insert({"ReduceSum", obj});
  }
}