def tensor_to_imgarray()

in utils.py [0:0]


def tensor_to_imgarray(image, floating_point=False):
    """Convert a normalized tensor or matrix as used by the model into a standard image array

    Parameters
    ----------
    image : Union[numpy.ndarray, torch.Tensor]
        A mean/std-normalized image tensor or matrix in inference format for the model
    floating_point : bool (Optional)
        Set True to skip conversion to 0-255 uint8 and return a 0-1.0 float ndarray instead
    """
    if torch.is_tensor(image):
        image = image.cpu().numpy()
        if len(image.shape) > 3:
            # Leading batch dimension - take first el only
            image = image[tuple(0 if dim == 0 else slice(None) for dim in range(len(image.shape)))]

    image_shape = image.shape
    channeldim = image_shape.index(3)
    result = image

    # Move channel to correct (trailing) dim if not already:
    if channeldim < (len(image_shape) - 1):
        result = np.moveaxis(result, channeldim, -1)
        image_shape = result.shape
        channeldim = len(image_shape) - 1

    # Pad mean and stddev constants to image dimensions
    # TODO: Simplify this when we're consistent in what the image dimensions are!
    # We use a loop here in case some environments use numpy<1.18 when the functionality to accept a tuple of
    # axes was introduced:
    stddev = image_norm_stddev
    mean = image_norm_mean
    for _ in range(channeldim):
        stddev = np.expand_dims(stddev, 0)
        mean = np.expand_dims(mean, 0)
    for _ in range(channeldim + 1, len(image_shape)):
        stddev = np.expand_dims(stddev, -1)

    result = (result * stddev) + mean
    if floating_point:
        return np.clip(result, 0., 1.)
    else:
        return np.clip(result * 255.0, 0, 255).astype(np.uint8)