def run_single_shot()

in yourbench/pipeline/question_generation.py [0:0]


def run_single_shot(config: dict[str, Any]) -> None:
    """
    Orchestrates the single-hop question generation pipeline.
    """
    stage_cfg = config.get("pipeline", {}).get(SINGLE_SHOT_KEY, {})
    if not stage_cfg.get("run", False):
        logger.info("single_shot_question_generation stage is disabled.")
        return

    question_mode = stage_cfg.get("question_mode", "open-ended")
    allowed_types = {"open-ended", "multi-choice"}
    if question_mode not in allowed_types:
        logger.warning(f"Invalid question_mode '{question_mode}', defaulting to 'open-ended'")
        question_mode = "open-ended"

    logger.info(f"Single-shot question_mode: {question_mode}")

    if question_mode == "multi-choice":
        system_prompt = QUESTION_GENERATION_SYSTEM_PROMPT_MULTI
        logger.debug("Using MULTI-CHOICE prompt for single-shot generation.")
    else:
        system_prompt = QUESTION_GENERATION_SYSTEM_PROMPT
        logger.debug("Using OPEN-ENDED prompt for single-shot generation.")

    system_msg = {"role": "system", "content": system_prompt}

    dataset = custom_load_dataset(config=config, subset="chunked")
    logger.info(f"Loaded {len(dataset)} chunks for single-shot.")

    sampling_cfg = get_sampling_cfg(stage_cfg)

    inference_calls, inference_index_map = build_single_shot_inference_calls(
        dataset, system_msg, stage_cfg, sampling_cfg
    )
    if not inference_calls:
        logger.warning("No valid inference calls for single-shot.")
        return

    responses = run_inference(config=config, step_name=SINGLE_SHOT_KEY, inference_calls=inference_calls)
    final_rows = parse_single_shot_responses(responses, inference_index_map, stage_cfg)

    if final_rows:
        logger.info(f"Saving {len(final_rows)} single-shot questions.")
        custom_save_dataset(Dataset.from_list(final_rows), config=config, subset="single_shot_questions")