in sdk/python/foundation-models/system/inference/text-generation/llama-files/score/default/score.py [0:0]
def create_tensor_spec_sample_io(model_signature_io):
# Create a sample numpy.ndarray based on shape/type of the tensor info of the model
io = model_signature_io.inputs
if not model_signature_io.has_input_names():
# If the input is not a named tensor, the sample io value that we create will just be a numpy.ndarray
shape = io[0].shape
if shape and shape[0] == -1:
# -1 for first dimension means the input data is batched
# Create a numpy array with the first dimension of shape as 1 so that inference-schema
# can correctly generate the swagger sample for the input
shape = list(deepcopy(shape))
shape[0] = 1
sample_io = np.zeros(tuple(shape), dtype=io[0].type)
else:
# otherwise, the input is a named tensor, so the sample io value that we create will be
# Dict[str, numpy.ndarray], which maps input name to a numpy.ndarray of the corresponding size
sample_io = {}
for io_val in io:
shape = io_val.shape
if shape and shape[0] == -1:
# -1 for first dimension means the input data is batched
# Create a numpy array with the first dimension of shape as 1 so that inference-schema
# can correctly generate the swagger sample for the input
shape = list(deepcopy(shape))
shape[0] = 1
sample_io[io_val.name] = np.zeros(tuple(shape), dtype=io_val.type)
return sample_io