in HuggingChat-Mac/LocalSTT/AudioModelManager.swift [590:617]
func transcribeCurrentFile(path: String) async throws {
// Load and convert buffer in a limited scope
let audioFileSamples = try await Task {
try autoreleasepool {
let audioFileBuffer = try AudioProcessor.loadAudio(fromPath: path)
return AudioProcessor.convertBufferToArray(buffer: audioFileBuffer)
}
}.value
let transcription = try await transcribeAudioSamples(audioFileSamples)
await MainActor.run {
currentText = ""
guard let segments = transcription?.segments else {
return
}
self.tokensPerSecond = transcription?.timings.tokensPerSecond ?? 0
self.effectiveRealTimeFactor = transcription?.timings.realTimeFactor ?? 0
self.effectiveSpeedFactor = transcription?.timings.speedFactor ?? 0
self.currentEncodingLoops = Int(transcription?.timings.totalEncodingRuns ?? 0)
self.firstTokenTime = transcription?.timings.firstTokenTime ?? 0
self.pipelineStart = transcription?.timings.pipelineStart ?? 0
self.currentLag = transcription?.timings.decodingLoop ?? 0
self.confirmedSegments = segments
}
}