def apply_prompt()

in preprocess/utils.py [0:0]


def apply_prompt(task, example, do_train, prompt_names_per_task, prompt_dict):
    if do_train:
        curr_dict = {}
        for name in prompt_names_per_task[task]:
            out = prompt_dict[task+":"+name].apply(example)
            if out==[""]:
                continue
            input_, output_ = out
            curr_dict["inst:"+task+":"+name] = {"task": "inst:"+task+":"+name, "input": input_, "output": output_, "options": []}
        return curr_dict
    input_, output_ = prompt_dict[task].apply(example)
    options = prompt_dict[task].get_answer_choices_list(example)

    # these are for special cases where prompt does not handle answer options properly
    if task=="commonsense_qa":
        assert options is None
        options = example["choices"]["text"]
        assert output_ in options
    elif task=="codah":
        assert options is None
        output_ = output_.strip()
        options = [o.strip() for o in example["candidate_answers"]]
        assert output_ in options, (output_, options)
    elif task=="yelp_polarity":
        assert options == ["no", "yes"] and output_ in ["yes.", "no."], (output_, options)
        output_ = output_[:-1]
        assert output_ in options, (output_, options)
    elif task=="sick":
        assert options is None
        options = ["entailment", "neutral", "contradiction"]
        assert output_ in options, (output_, options)

    if options is None or len(options)==0:
        assert do_train
        options = []
    else:
        assert output_ in options, (task, output_, options)
    return json.dumps({"task": "inst:"+task, "input": input_, "output": output_, "options": options})