in bindings/python/py_src/safetensors/torch.py [0:0]
def _flatten(tensors: Dict[str, torch.Tensor]) -> Dict[str, Dict[str, Any]]:
if not isinstance(tensors, dict):
raise ValueError(f"Expected a dict of [str, torch.Tensor] but received {type(tensors)}")
invalid_tensors = []
for k, v in tensors.items():
if not isinstance(v, torch.Tensor):
raise ValueError(f"Key `{k}` is invalid, expected torch.Tensor but received {type(v)}")
if v.layout != torch.strided:
invalid_tensors.append(k)
if invalid_tensors:
raise ValueError(
f"You are trying to save a sparse tensors: `{invalid_tensors}` which this library does not support."
" You can make it a dense tensor before saving with `.to_dense()` but be aware this might"
" make a much larger file than needed."
)
shared_pointers = _find_shared_tensors(tensors)
failing = []
for names in shared_pointers:
if len(names) > 1:
failing.append(names)
if failing:
raise RuntimeError(
f"""
Some tensors share memory, this will lead to duplicate memory on disk and potential differences when loading them again: {failing}.
A potential way to correctly save your model is to use `save_model`.
More information at https://huggingface.co/docs/safetensors/torch_shared_tensors
"""
)
return {
k: {
"dtype": str(v.dtype).split(".")[-1],
"shape": v.shape,
"data": _tobytes(v, k),
}
for k, v in tensors.items()
}