shap_e/models/transmitter/channels_encoder.py [354:387]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            )
        if self.cross_attention_dataset == "multiview":
            self.image_size = image_size
            self.patch_size = patch_size
            self.pose_dropout = pose_dropout
            self.use_depth = use_depth
            self.max_depth = max_depth
            pos_ctx = (image_size // patch_size) ** 2
            self.register_parameter(
                "pos_emb",
                nn.Parameter(
                    torch.randn(
                        pos_ctx * self.inner_batch_size,
                        self.width,
                        device=self.device,
                        dtype=self.dtype,
                    )
                ),
            )
            self.patch_emb = nn.Conv2d(
                in_channels=3 if not use_depth else 4,
                out_channels=self.width,
                kernel_size=patch_size,
                stride=patch_size,
                device=self.device,
                dtype=self.dtype,
            )
            self.camera_emb = nn.Sequential(
                nn.Linear(
                    3 * 4 + 1, self.width, device=self.device, dtype=self.dtype
                ),  # input size is for origin+x+y+z+fov
                nn.GELU(),
                nn.Linear(self.width, 2 * self.width, device=self.device, dtype=self.dtype),
            )
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



shap_e/models/transmitter/pc_encoder.py [227:260]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        )
        if self.cross_attention_dataset == "multiview":
            self.image_size = image_size
            self.patch_size = patch_size
            self.pose_dropout = pose_dropout
            self.use_depth = use_depth
            self.max_depth = max_depth
            pos_ctx = (image_size // patch_size) ** 2
            self.register_parameter(
                "pos_emb",
                nn.Parameter(
                    torch.randn(
                        pos_ctx * self.inner_batch_size,
                        self.width,
                        device=self.device,
                        dtype=self.dtype,
                    )
                ),
            )
            self.patch_emb = nn.Conv2d(
                in_channels=3 if not use_depth else 4,
                out_channels=self.width,
                kernel_size=patch_size,
                stride=patch_size,
                device=self.device,
                dtype=self.dtype,
            )
            self.camera_emb = nn.Sequential(
                nn.Linear(
                    3 * 4 + 1, self.width, device=self.device, dtype=self.dtype
                ),  # input size is for origin+x+y+z+fov
                nn.GELU(),
                nn.Linear(self.width, 2 * self.width, device=self.device, dtype=self.dtype),
            )
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



