def run()

in sdk/python/foundation-models/system/inference/text-generation/llama-files/score/default/score.py [0:0]


def run(input_data):
    context = None
    input_data, severity = get_safe_input(input_data)
    if severity > aacs_threshold:
        return {}
    if (
        isinstance(input_data, np.ndarray)
        or (
            isinstance(input_data, dict)
            and input_data
            and isinstance(list(input_data.values())[0], np.ndarray)
        )
        or (pandas_installed and isinstance(input_data, pd.DataFrame))
    ):
        # Collect model input
        try:
            context = inputs_collector.collect(input_data)
        except Exception as e:
            _logger.error(
                "Error collecting model_inputs collection request. {}".format(e)
            )

        result = model.predict(input_data)

        # Collect model output
        try:
            mdc_output_df = pd.DataFrame(result)
            outputs_collector.collect(mdc_output_df, context)
        except Exception as e:
            _logger.error(
                "Error collecting model_outputs collection request. {}".format(e)
            )

        return get_safe_response(result)

        # Collect model input
    try:
        context = inputs_collector.collect(input)
    except Exception as e:
        _logger.error("Error collecting model_inputs collection request. {}".format(e))

    if is_transformers or is_langchain or is_openai:
        input = parse_model_input_from_input_data_transformers(input_data)
    else:
        input = parse_model_input_from_input_data_traditional(input_data)
    result = model.predict(input)

    # Collect output data
    try:
        mdc_output_df = pd.DataFrame(result)
        outputs_collector.collect(mdc_output_df, context)
    except Exception as e:
        _logger.error("Error collecting model_outputs collection request. {}".format(e))

    return get_safe_response(result)