def forward()

in pytorchvideo/layers/accelerator/mobile_cpu/conv_helper.py [0:0]


    def forward(self, x):
        """
        Use three conv2d to emulate conv3d.
        Args:
           x (torch.Tensor): 5D tensor of (B, C, T, H, W)
        """
        t, h, w = self._thw_shape
        out_tensor_list = []
        if (
            t == 1
        ):  # Degenerated to simple conv2d, but make sure output still has T dimension
            return self._conv2d_2(x[:, :, 0]).unsqueeze(2)
        elif t == 2:
            # out_tensor_list[0]: conv2d_1_1_0, conv2d_1_1_1 and conv2d_1_1_4 are
            # applied to zero padding.
            cur_tensor = (
                self._add_funcs[0]
                .add(self._conv2d_2(x[:, :, 0]), self._conv2d_3(x[:, :, 1]))
                .unsqueeze(2)
            )
            out_tensor_list.append(cur_tensor)
            # out_tensor_list[1]: conv2d_1_1_0, conv2d_1_1_3 and conv2d_1_1_4 are
            # applied to zero padding.

            cur_tensor = (
                self._add_funcs[1]
                .add(self._conv2d_1(x[:, :, 0]), self._conv2d_2(x[:, :, 1]))
                .unsqueeze(2)
            )
            out_tensor_list.append(cur_tensor)
        elif t == 3:
            # out_tensor_list[0]: conv2d_1_1_0, conv2d_1_1_1 are applied to zero padding.
            cur_tensor = (
                self._add_funcs[0]
                .add(
                    self._add_funcs[1].add(
                        self._conv2d_2(x[:, :, 0]), self._conv2d_3(x[:, :, 1])
                    ),
                    self._conv2d_4(x[:, :, 2]),
                )
                .unsqueeze(2)
            )
            out_tensor_list.append(cur_tensor)
            # out_tensor_list[1]: conv2d_1_1_0, conv2d_1_1_4 are applied to zero padding.
            cur_tensor = (
                self._add_funcs[2]
                .add(
                    self._add_funcs[3].add(
                        self._conv2d_1(x[:, :, 0]), self._conv2d_2(x[:, :, 1])
                    ),
                    self._conv2d_3(x[:, :, 2]),
                )
                .unsqueeze(2)
            )
            out_tensor_list.append(cur_tensor)
            # out_tensor_list[2]: conv2d_1_1_3, conv2d_1_1_4 are applied to zero padding.
            cur_tensor = (
                self._add_funcs[4]
                .add(
                    self._add_funcs[5].add(
                        self._conv2d_0(x[:, :, 0]), self._conv2d_1(x[:, :, 1])
                    ),
                    self._conv2d_2(x[:, :, 2]),
                )
                .unsqueeze(2)
            )
            out_tensor_list.append(cur_tensor)
        elif t == 4:
            # out_tensor_list[0]: conv2d_1_1_0, conv2d_1_1_1 are applied to zero padding.
            cur_tensor = (
                self._add_funcs[0]
                .add(
                    self._add_funcs[1].add(
                        self._conv2d_2(x[:, :, 0]), self._conv2d_3(x[:, :, 1])
                    ),
                    self._conv2d_4(x[:, :, 2]),
                )
                .unsqueeze(2)
            )
            out_tensor_list.append(cur_tensor)
            # out_tensor_list[1]: conv2d_1_1_0 is applied to zero padding.
            cur_tensor = (
                self._add_funcs[2]
                .add(
                    self._add_funcs[3].add(
                        self._add_funcs[4].add(
                            self._conv2d_1(x[:, :, 0]),
                            self._conv2d_2(x[:, :, 1]),
                        ),
                        self._conv2d_3(x[:, :, 2]),
                    ),
                    self._conv2d_4(x[:, :, 3]),
                )
                .unsqueeze(2)
            )
            out_tensor_list.append(cur_tensor)
            # out_tensor_list[2]: conv2d_1_1_4 is applied to zero padding.
            cur_tensor = (
                self._add_funcs[5]
                .add(
                    self._add_funcs[6].add(
                        self._add_funcs[7].add(
                            self._conv2d_0(x[:, :, 0]),
                            self._conv2d_1(x[:, :, 1]),
                        ),
                        self._conv2d_2(x[:, :, 2]),
                    ),
                    self._conv2d_3(x[:, :, 3]),
                )
                .unsqueeze(2)
            )
            out_tensor_list.append(cur_tensor)
            # out_tensor_list[3]: conv2d_1_1_3, conv2d_1_1_4 are applied to zero padding.
            cur_tensor = (
                self._add_funcs[8]
                .add(
                    self._add_funcs[9].add(
                        self._conv2d_0(x[:, :, 1]), self._conv2d_1(x[:, :, 2])
                    ),
                    self._conv2d_2(x[:, :, 3]),
                )
                .unsqueeze(2)
            )
            out_tensor_list.append(cur_tensor)
        else:  # t >= 5
            # out_tensor_list[0]: conv2d_1_1_0, conv2d_1_1_1 are applied to zero padding.
            add_func_idx_base = 0
            cur_tensor = (
                self._add_funcs[add_func_idx_base]
                .add(
                    self._add_funcs[add_func_idx_base + 1].add(
                        self._conv2d_2(x[:, :, 0]), self._conv2d_3(x[:, :, 1])
                    ),
                    self._conv2d_4(x[:, :, 2]),
                )
                .unsqueeze(2)
            )
            out_tensor_list.append(cur_tensor)
            add_func_idx_base += 2
            # out_tensor_list[1]: conv2d_1_1_0 is applied to zero padding.
            cur_tensor = (
                self._add_funcs[add_func_idx_base]
                .add(
                    self._add_funcs[add_func_idx_base + 1].add(
                        self._add_funcs[add_func_idx_base + 2].add(
                            self._conv2d_1(x[:, :, 0]),
                            self._conv2d_2(x[:, :, 1]),
                        ),
                        self._conv2d_3(x[:, :, 2]),
                    ),
                    self._conv2d_4(x[:, :, 3]),
                )
                .unsqueeze(2)
            )
            out_tensor_list.append(cur_tensor)
            add_func_idx_base += 3
            # out_tensor_list[2:-2]: zero padding has no effect.
            for idx in range(4, t):
                cur_tensor = (
                    self._add_funcs[add_func_idx_base]
                    .add(
                        self._add_funcs[add_func_idx_base + 1].add(
                            self._add_funcs[add_func_idx_base + 2].add(
                                self._add_funcs[add_func_idx_base + 3].add(
                                    self._conv2d_0(x[:, :, idx - 4]),
                                    self._conv2d_1(x[:, :, idx - 3]),
                                ),
                                self._conv2d_2(x[:, :, idx - 2]),
                            ),
                            self._conv2d_3(x[:, :, idx - 1]),
                        ),
                        self._conv2d_4(x[:, :, idx]),
                    )
                    .unsqueeze(2)
                )
                out_tensor_list.append(cur_tensor)
                add_func_idx_base += 4
            # out_tensor_list[-2]: conv2d_1_1_4 is applied to zero padding.
            cur_tensor = (
                self._add_funcs[add_func_idx_base]
                .add(
                    self._add_funcs[add_func_idx_base + 1].add(
                        self._add_funcs[add_func_idx_base + 2].add(
                            self._conv2d_0(x[:, :, -4]),
                            self._conv2d_1(x[:, :, -3]),
                        ),
                        self._conv2d_2(x[:, :, -2]),
                    ),
                    self._conv2d_3(x[:, :, -1]),
                )
                .unsqueeze(2)
            )
            out_tensor_list.append(cur_tensor)
            add_func_idx_base += 3
            # out_tensor_list[-1]: conv2d_1_1_3, conv2d_1_1_4 are applied to zero padding.
            cur_tensor = (
                self._add_funcs[add_func_idx_base]
                .add(
                    self._add_funcs[add_func_idx_base + 1].add(
                        self._conv2d_0(x[:, :, -3]),
                        self._conv2d_1(x[:, :, -2]),
                    ),
                    self._conv2d_2(x[:, :, -1]),
                )
                .unsqueeze(2)
            )
            out_tensor_list.append(cur_tensor)
        return self._cat_func.cat(out_tensor_list, 2)