in neuralcompression/models/deep_video_compression.py [0:0]
def decompress(self, image1: Tensor, compressed_pframe: CompressedPFrame) -> Tensor:
"""
Decompress motion fields and residual and compute next frame estimate.
Args:
image1: The base image for computing the next frame estimate.
compressed_pframe: A compressed P-frame and metadata.
Returns:
An estimate of ``image2`` using ``image1`` and the compressed
transition information.
"""
flow_latent_size = (
compressed_pframe.flow_decomp_sizes[-1][-2] // 2,
compressed_pframe.flow_decomp_sizes[-1][-1] // 2,
)
flow_latent = self.motion_entropy_bottleneck.decompress(
compressed_pframe.compressed_flow, flow_latent_size
)
flow = self.motion_decoder(flow_latent, compressed_pframe.flow_decomp_sizes)
# apply optical flow fields
image2_est = ncF.dense_image_warp(image1, flow.permute(0, 2, 3, 1))
# compensate for optical flow errors
image2_est = self.motion_compensation(image1, image2_est, flow)
# decode residual
residual_latent_size = (
compressed_pframe.residual_decomp_sizes[-1][-2] // 2,
compressed_pframe.residual_decomp_sizes[-1][-1] // 2,
)
residual_latent = self.motion_entropy_bottleneck.decompress(
compressed_pframe.compressed_residual, residual_latent_size
)
residual = self.residual_decoder(
residual_latent, compressed_pframe.residual_decomp_sizes
)
return (image2_est + residual).clamp_(0, 1)