def share_model_across_processes()

in pipelines/ml_ai_python/ml_ai_pipeline/model_handlers.py [0:0]


  def share_model_across_processes(self) -> bool:
    """ Indicates if the model should be loaded once-per-VM rather than
    once-per-worker-process on a VM. Because Gemma is a large language model,
    this will always return True to avoid OOM errors.
    """
    return True