def to_video()

in utils/renderer.py [0:0]


    def to_video(self, verts: th.Tensor, audio_file: str, video_output: str, fps: int = 30, batch_size: int = 30):
        """
        :param verts: B x V x 3 tensor containing a batch of face vertex positions to be rendered
        :param audio_file: filename of the audio input file
        :param video_output: filename of the output video file
        :param fps: frame rate of output video
        :param batch_size: number of frames to render simultaneously in one batch
        """
        if not video_output[-4:] == '.mp4':
            video_output = video_output + '.mp4'

        images = th.cat([self.render(v).cpu() for v in th.split(verts, batch_size)], dim=0)
        images = 255 * images[:, :, :, :3].contiguous().numpy()
        images = images.astype(np.uint8)

        video_stream = ffmpeg.input("pipe:", format="rawvideo", pix_fmt="rgb24", s="480x640", r=fps)
        audio_stream = ffmpeg.input(filename=audio_file)
        streams = [video_stream, audio_stream]
        output_args = {
            "format": "mp4",
            "pix_fmt": "yuv420p",
            "vcodec": "libx264",
            "movflags": "frag_keyframe+empty_moov+faststart"
        }
        proc = (
            ffmpeg
            .output(*streams, video_output, **output_args)
            .overwrite_output()
            .global_args("-loglevel", "fatal")
            .run_async(pipe_stdin=True, pipe_stdout=False)
        )

        proc.communicate(input=images.tobytes())