in torch_xla/csrc/tensor_util.cpp [814:847]
torch::lazy::hash_t TensorHash(const at::Tensor& tensor) {
at::Tensor ctensor = tensor.contiguous();
int64_t size = ctensor.numel() * ctensor.element_size();
switch (ctensor.scalar_type()) {
case at::ScalarType::Bool:
return torch::lazy::DataHash(ctensor.data_ptr<bool>(), size);
case at::ScalarType::Byte:
return torch::lazy::DataHash(ctensor.data_ptr<uint8_t>(), size);
case at::ScalarType::Char:
return torch::lazy::DataHash(ctensor.data_ptr<int8_t>(), size);
case at::ScalarType::Short:
return torch::lazy::DataHash(ctensor.data_ptr<int16_t>(), size);
case at::ScalarType::Int:
return torch::lazy::DataHash(ctensor.data_ptr<int32_t>(), size);
case at::ScalarType::Long:
return torch::lazy::DataHash(ctensor.data_ptr<int64_t>(), size);
case at::ScalarType::Float:
return torch::lazy::DataHash(ctensor.data_ptr<float>(), size);
case at::ScalarType::Double:
return torch::lazy::DataHash(ctensor.data_ptr<double>(), size);
case at::ScalarType::BFloat16:
return torch::lazy::DataHash(ctensor.data_ptr<at::BFloat16>(), size);
case at::ScalarType::Half:
return torch::lazy::DataHash(ctensor.data_ptr<at::Half>(), size);
case at::ScalarType::ComplexFloat:
return torch::lazy::DataHash(ctensor.data_ptr<c10::complex<float>>(),
size);
case at::ScalarType::ComplexDouble:
return torch::lazy::DataHash(ctensor.data_ptr<c10::complex<double>>(),
size);
default:
XLA_ERROR() << "Unsupported scalar type: " << ctensor.scalar_type();
}
}