def main()

in lm_eval/tasks/bigbench/generate_tasks.py [0:0]


def main() -> None:
    for path, task_type in zip(
        ["multiple_choice", "generate_until"],
        ["multiple_choice_template_yaml", "generate_until_template_yaml"],
    ):
        os.makedirs(path, exist_ok=True)
        for task in all_subtasks:
            file_name = f"{task}.yaml"
            try:
                template_file = task_type
                if path == "multiple_choice":
                    print(f"Checking {task} for multiple choices")
                    if task in skip_tasks:
                        continue
                    data = datasets.load_dataset("hails/bigbench", task + "_zero_shot")
                    multiple_choice_targets = data["default"][0][
                        "multiple_choice_targets"
                    ]
                    if len(multiple_choice_targets) == 0:
                        continue
                    else:
                        template_file = "multiple_choice_template_b_yaml"
                        if set(data["default"][0]["targets"]) < set(
                            multiple_choice_targets
                        ):
                            template_file = "multiple_choice_template_a_yaml"

                with open(f"{path}/{file_name}", "w", encoding="utf-8") as f:
                    f.write("# Generated by utils.py\n")
                    yaml.dump(
                        {
                            "include": f"../{template_file}",
                            "task": "bigbench_"
                            + task
                            + "_{}".format(task_type.split("_template_yaml")[0]),
                            "dataset_name": task
                            + "_zero_shot",  # zero-shot version of the dataset
                        },
                        f,
                        width=float("inf"),
                        allow_unicode=True,
                    )
            except FileExistsError:
                pass