src/openai/types/eval_create_response.py [18:112]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    "DataSourceConfig",
    "TestingCriterion",
    "TestingCriterionPython",
    "TestingCriterionScoreModel",
    "TestingCriterionScoreModelInput",
    "TestingCriterionScoreModelInputContent",
    "TestingCriterionScoreModelInputContentOutputText",
]

DataSourceConfig: TypeAlias = Annotated[
    Union[EvalCustomDataSourceConfig, EvalStoredCompletionsDataSourceConfig], PropertyInfo(discriminator="type")
]


class TestingCriterionPython(BaseModel):
    __test__ = False
    name: str
    """The name of the grader."""

    source: str
    """The source code of the python script."""

    type: Literal["python"]
    """The object type, which is always `python`."""

    image_tag: Optional[str] = None
    """The image tag to use for the python script."""

    pass_threshold: Optional[float] = None
    """The threshold for the score."""


class TestingCriterionScoreModelInputContentOutputText(BaseModel):
    __test__ = False
    text: str
    """The text output from the model."""

    type: Literal["output_text"]
    """The type of the output text. Always `output_text`."""


TestingCriterionScoreModelInputContent: TypeAlias = Union[
    str, ResponseInputText, TestingCriterionScoreModelInputContentOutputText
]


class TestingCriterionScoreModelInput(BaseModel):
    __test__ = False
    content: TestingCriterionScoreModelInputContent
    """Text inputs to the model - can contain template strings."""

    role: Literal["user", "assistant", "system", "developer"]
    """The role of the message input.

    One of `user`, `assistant`, `system`, or `developer`.
    """

    type: Optional[Literal["message"]] = None
    """The type of the message input. Always `message`."""


class TestingCriterionScoreModel(BaseModel):
    __test__ = False
    input: List[TestingCriterionScoreModelInput]
    """The input text. This may include template strings."""

    model: str
    """The model to use for the evaluation."""

    name: str
    """The name of the grader."""

    type: Literal["score_model"]
    """The object type, which is always `score_model`."""

    pass_threshold: Optional[float] = None
    """The threshold for the score."""

    range: Optional[List[float]] = None
    """The range of the score. Defaults to `[0, 1]`."""

    sampling_params: Optional[object] = None
    """The sampling parameters for the model."""


TestingCriterion: TypeAlias = Annotated[
    Union[
        EvalLabelModelGrader,
        EvalStringCheckGrader,
        EvalTextSimilarityGrader,
        TestingCriterionPython,
        TestingCriterionScoreModel,
    ],
    PropertyInfo(discriminator="type"),
]
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



src/openai/types/eval_retrieve_response.py [18:112]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    "DataSourceConfig",
    "TestingCriterion",
    "TestingCriterionPython",
    "TestingCriterionScoreModel",
    "TestingCriterionScoreModelInput",
    "TestingCriterionScoreModelInputContent",
    "TestingCriterionScoreModelInputContentOutputText",
]

DataSourceConfig: TypeAlias = Annotated[
    Union[EvalCustomDataSourceConfig, EvalStoredCompletionsDataSourceConfig], PropertyInfo(discriminator="type")
]


class TestingCriterionPython(BaseModel):
    __test__ = False
    name: str
    """The name of the grader."""

    source: str
    """The source code of the python script."""

    type: Literal["python"]
    """The object type, which is always `python`."""

    image_tag: Optional[str] = None
    """The image tag to use for the python script."""

    pass_threshold: Optional[float] = None
    """The threshold for the score."""


class TestingCriterionScoreModelInputContentOutputText(BaseModel):
    __test__ = False
    text: str
    """The text output from the model."""

    type: Literal["output_text"]
    """The type of the output text. Always `output_text`."""


TestingCriterionScoreModelInputContent: TypeAlias = Union[
    str, ResponseInputText, TestingCriterionScoreModelInputContentOutputText
]


class TestingCriterionScoreModelInput(BaseModel):
    __test__ = False
    content: TestingCriterionScoreModelInputContent
    """Text inputs to the model - can contain template strings."""

    role: Literal["user", "assistant", "system", "developer"]
    """The role of the message input.

    One of `user`, `assistant`, `system`, or `developer`.
    """

    type: Optional[Literal["message"]] = None
    """The type of the message input. Always `message`."""


class TestingCriterionScoreModel(BaseModel):
    __test__ = False
    input: List[TestingCriterionScoreModelInput]
    """The input text. This may include template strings."""

    model: str
    """The model to use for the evaluation."""

    name: str
    """The name of the grader."""

    type: Literal["score_model"]
    """The object type, which is always `score_model`."""

    pass_threshold: Optional[float] = None
    """The threshold for the score."""

    range: Optional[List[float]] = None
    """The range of the score. Defaults to `[0, 1]`."""

    sampling_params: Optional[object] = None
    """The sampling parameters for the model."""


TestingCriterion: TypeAlias = Annotated[
    Union[
        EvalLabelModelGrader,
        EvalStringCheckGrader,
        EvalTextSimilarityGrader,
        TestingCriterionPython,
        TestingCriterionScoreModel,
    ],
    PropertyInfo(discriminator="type"),
]
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



