in rbi/lib/openai/models/evals/run_retrieve_response.rbi [109:149]
def self.new(
id:,
created_at:,
data_source:,
error:,
eval_id:,
metadata:,
model:,
name:,
per_model_usage:,
per_testing_criteria_results:,
report_url:,
result_counts:,
status:,
object: :"eval.run"
); end
sig do
override
.returns(
{
id: String,
created_at: Integer,
data_source: T.any(
OpenAI::Models::Evals::CreateEvalJSONLRunDataSource,
OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource
),
error: OpenAI::Models::Evals::EvalAPIError,
eval_id: String,
metadata: T.nilable(T::Hash[Symbol, String]),
model: String,
name: String,
object: Symbol,
per_model_usage: T::Array[OpenAI::Models::Evals::RunRetrieveResponse::PerModelUsage],
per_testing_criteria_results: T::Array[OpenAI::Models::Evals::RunRetrieveResponse::PerTestingCriteriaResult],
report_url: String,
result_counts: OpenAI::Models::Evals::RunRetrieveResponse::ResultCounts,
status: String
}
)
end