in rbi/lib/openai/resources/audio/transcriptions.rbi [25:95]
def create(
file:,
model:,
include: nil,
language: nil,
prompt: nil,
response_format: nil,
temperature: nil,
timestamp_granularities: nil,
stream: false,
request_options: {}
); end
sig do
params(
file: T.any(Pathname, StringIO),
model: T.any(String, OpenAI::Models::AudioModel::OrSymbol),
include: T::Array[OpenAI::Models::Audio::TranscriptionInclude::OrSymbol],
language: String,
prompt: String,
response_format: OpenAI::Models::AudioResponseFormat::OrSymbol,
temperature: Float,
timestamp_granularities: T::Array[OpenAI::Models::Audio::TranscriptionCreateParams::TimestampGranularity::OrSymbol],
stream: T.noreturn,
request_options: T.nilable(T.any(OpenAI::RequestOptions, OpenAI::Internal::AnyHash))
)
.returns(
OpenAI::Internal::Stream[
T.any(
OpenAI::Models::Audio::TranscriptionTextDeltaEvent,
OpenAI::Models::Audio::TranscriptionTextDoneEvent
)
]
)
end