async function startAudioContextRecording()

in client/src/util/audio.ts [112:156]


async function startAudioContextRecording(stream: MediaStream, bufferSize: number): Promise<RecordingStream> {
  const audioContextType = (window as any).AudioContext || (window as any).webkitAudioContext;
  if (!audioContextType) {
    return Promise.reject(new Error('AudioContext not supported'));
  }
  const context = new audioContextType();
  const sampleRate = context.sampleRate;
  const streamNode = context.createMediaStreamSource(stream);
  const processorNode = context.createScriptProcessor(bufferSize, 1, 1);
  const recordingStream: RecordingStream = {
    onended: null,
    stop: () => {
      if (stream) {
        for(const track of stream.getAudioTracks()) {
          track.stop();
        }
      }
      streamNode.disconnect();
      processorNode.disconnect();
      context.close();
      if (recordingStream.onended) {
        recordingStream.onended(audioBuffersToWAV(buffer, sampleRate));
      }
    }
  };
  let buffer: Float32Array = new Float32Array();
  stream.getAudioTracks()[0].onended = () => {
    streamNode.disconnect();
    processorNode.disconnect();
    context.close();
    if (recordingStream.onended) {
      recordingStream.onended(audioBuffersToWAV(buffer, sampleRate));
    }
  };
  processorNode.addEventListener('audioprocess', (ev: AudioProcessingEvent) => {
    const samples = ev.inputBuffer.getChannelData(0);
    const tmp = new Float32Array(buffer.length + samples.length);
    tmp.set(buffer, 0);
    tmp.set(samples, buffer.length);
    buffer = tmp;
  });
  streamNode.connect(processorNode);
  processorNode.connect(context.destination);
  return recordingStream;
}