def run_segmentation_inference()

in src/edge/run.py [0:0]


def run_segmentation_inference(agent, filename):
    """Runs inference on the given image file. Returns prediction and model latency."""

    # Check if model for segmentation is downloaded
    model_name_img_seg = config['mappings']['image-segmentation-app']
    model_is_loaded = any([m['name']==model_name_img_seg for m in models_loaded])
    if not model_is_loaded:
        logging.info('Model for image segmentation not loaded, waiting for deployment...')
        return None, None

    # Get the identifier of the currently loaded model
    model_dict_img_seg = next((x for x in models_loaded if x['name'] == model_name_img_seg), None)
    if not model_dict_img_seg:
        logging.info('Model for image segmentation not loaded, waiting for deployment...')
        return None, None
    model_id_img_seg = model_dict_img_seg['identifier']

    logging.info('\nSegmentation inference with file %s and model %s' % (filename, model_id_img_seg))
    image = PIL.Image.open(filename)
    image = image.convert(mode='RGB')

    # Preprocessing
    x_batchified = preprocess_image(image, IMG_WIDTH, IMG_HEIGHT)

    # Fit into 0-1 range, as the unet model expects this
    x_batchified = x_batchified/255.0

    # Run inference
    t_start = timer()
    y = agent.predict(model_id_img_seg, x_batchified)
    t_stop = timer()
    t_ms = np.round((t_stop - t_start) * 1000, decimals=0)

    y_mask = y[0] > 0.5
    agent.capture_data(model_id_img_seg, x_batchified, y.astype(np.float32))

    return y_mask, t_ms