in tensorflow_fold/llgtm/backend/tf_evaluator.cc [291:414]
void TfGraphEvaluator::Init() {
// Code adapted from learning/brain/contrib/eager
Status status;
tensorflow::SessionOptions options;
std::vector<tensorflow::Device*> devices;
status = tensorflow::DeviceFactory::AddDevices(
options, /*name_prefix=*/ "/job:llgtm/replica:0/task:0", &devices);
CHECK(status.ok());
device_mgr_ = absl::make_unique<tensorflow::DeviceMgr>(devices);
devices_ = device_mgr_->ListDevices();
// TODO(delesley): Add support for multiple devices other than cpu.
tensorflow::Device* cpu_device = devices_[0];
kernels_.resize(kMaximumTensorOpcode);
kernels_[kOpAdd] = absl::make_unique<TfKernelAdapter>();
kernels_[kOpAdd]->Create("Add")
.TypeAttr<float>("T")
.NumInputs(2)
.Init(cpu_device);
kernels_[kOpConstantFromScalar] = absl::make_unique<TfKernelAdapter>();
kernels_[kOpConstantFromScalar]->Create("Fill")
.TypeAttr<float>("T")
.NumInputs(2)
.Init(cpu_device);
kernels_[kOpMultiply] = absl::make_unique<TfKernelAdapter>();
kernels_[kOpMultiply]->Create("Mul")
.TypeAttr<float>("T")
.NumInputs(2)
.Init(cpu_device);
kernels_[kOpReciprocal] = absl::make_unique<TfKernelAdapter>();
kernels_[kOpReciprocal]->Create("Reciprocal")
.TypeAttr<float>("T")
.NumInputs(1)
.Init(cpu_device);
// TODO(matthiasspringer): Utilize transpose_a/b attributes.
kernels_[kOpMatmul] = absl::make_unique<TfKernelAdapter>();
kernels_[kOpMatmul]->Create("MatMul")
.Attr("transpose_a", false)
.Attr("transpose_b", false)
.TypeAttr<float>("T")
.NumInputs(2)
.Init(cpu_device);
kernels_[kOpNegative] = absl::make_unique<TfKernelAdapter>();
kernels_[kOpNegative]->Create("Neg")
.TypeAttr<float>("T")
.NumInputs(1)
.Init(cpu_device);
kernels_[kOpSigmoid] = absl::make_unique<TfKernelAdapter>();
kernels_[kOpSigmoid]->Create("Sigmoid")
.TypeAttr<float>("T")
.NumInputs(1)
.Init(cpu_device);
kernels_[kOpTanh] = absl::make_unique<TfKernelAdapter>();
kernels_[kOpTanh]->Create("Tanh")
.TypeAttr<float>("T")
.NumInputs(1)
.Init(cpu_device);
kernels_[kOpTranspose] = absl::make_unique<TfKernelAdapter>();
kernels_[kOpTranspose]->Create("Transpose")
.TypeAttr<float>("T")
.TypeAttr<int32_t>("Tperm")
.NumInputs(2)
.Init(cpu_device);
kernels_[kOpReduceSum] = absl::make_unique<TfKernelAdapter>();
kernels_[kOpReduceSum]->Create("Sum")
.Attr("keep_dims", true)
.TypeAttr<float>("T")
.TypeAttr<int32_t>("Tidx")
.NumInputs(2)
.Init(cpu_device);
kernels_[kOpReshape] = absl::make_unique<TfKernelAdapter>();
kernels_[kOpReshape]->Create("Reshape")
.TypeAttr<float>("T")
.TypeAttr<int32_t>("Tshape")
.NumInputs(2)
.Init(cpu_device);
kernels_[kOpRelu] = absl::make_unique<TfKernelAdapter>();
kernels_[kOpRelu]->Create("Relu")
.TypeAttr<float>("T")
.NumInputs(1)
.Init(cpu_device);
kernels_[kOpReluGrad] = absl::make_unique<TfKernelAdapter>();
kernels_[kOpReluGrad]->Create("ReluGrad")
.TypeAttr<float>("T")
.NumInputs(2)
.Init(cpu_device);
kernels_[kOpNormalRandom] = absl::make_unique<TfKernelAdapter>();
kernels_[kOpNormalRandom]->Create("RandomStandardNormal")
.Attr("seed", 0)
.Attr("seed2", 0)
.TypeAttr<int32_t>("T")
.TypeAttr<float>("dtype")
.NumInputs(1)
.Init(cpu_device);
kernels_[kOpUniformRandom] = absl::make_unique<TfKernelAdapter>();
kernels_[kOpUniformRandom]->Create("RandomUniform")
.Attr("seed", 0)
.Attr("seed2", 0)
.TypeAttr<int32_t>("T")
.TypeAttr<float>("dtype")
.NumInputs(1)
.Init(cpu_device);
// TODO(delesley): Handle broadcast.
// TODO(matthiasspringer): Handle softmax and softmax cross-entropy (+gradient).
// TODO(matthiasspringer): Handle concat and split.
}