def forward()

in pytorch3d/renderer/points/pulsar/unified.py [0:0]


    def forward(self, point_clouds, **kwargs) -> torch.Tensor:
        """
        Get the rendering of the provided `Pointclouds`.

        The number of point clouds in the `Pointclouds` object determines the
        number of resulting images. The provided cameras can be either 1 or equal
        to the number of pointclouds (in the first case, the same camera will be
        used for all clouds, in the latter case each point cloud will be rendered
        with the corresponding camera).

        The following kwargs are support from PyTorch3D (depending on the selected
        camera model potentially overriding camera parameters):
            radius_world (bool): use the provided radiuses from the raster_settings
              plain as radiuses in world space. Default: False.
            znear (Iterable[float]): near geometry cutoff. Is required for
              OrthographicCameras and PerspectiveCameras.
            zfar (Iterable[float]): far geometry cutoff. Is required for
              OrthographicCameras and PerspectiveCameras.
            R (torch.Tensor): [Bx3x3] camera rotation matrices.
            T (torch.Tensor): [Bx3] camera translation vectors.
            principal_point (torch.Tensor): [Bx2] camera intrinsic principal
              point offset vectors.
            focal_length (torch.Tensor): [Bx1] camera intrinsic focal lengths.
            aspect_ratio (Iterable[float]): camera aspect ratios.
            fov (Iterable[float]): camera FOVs.
            degrees (bool): whether FOVs are specified in degrees or
              radians.
            min_x (Iterable[float]): minimum x for the FoVOrthographicCameras.
            max_x (Iterable[float]): maximum x for the FoVOrthographicCameras.
            min_y (Iterable[float]): minimum y for the FoVOrthographicCameras.
            max_y (Iterable[float]): maximum y for the FoVOrthographicCameras.

        The following kwargs are supported from pulsar:
            gamma (float): The gamma value to use. This defines the transparency for
                differentiability (see pulsar paper for details). Must be in [1., 1e-5]
                with 1.0 being mostly transparent. This keyword argument is *required*!
            bg_col (torch.Tensor): The background color. Must be a tensor on the same
                device as the point clouds, with as many channels as features (no batch
                dimension - it is the same for all images in the batch).
                Default: 0.0 for all channels.
            percent_allowed_difference (float): a value in [0., 1.[ with the maximum
                allowed difference in channel space. This is used to speed up the
                computation. Default: 0.01.
            max_n_hits (int): a hard limit on the number of sphere hits per ray.
                Default: max int.
            mode (int): render mode in {0, 1}. 0: render image; 1: render hit map.
        """
        orthogonal_projection: bool = self._conf_check(point_clouds, kwargs)
        # Get access to inputs. We're using the list accessor and process
        # them sequentially.
        position_list = point_clouds.points_list()
        features_list = point_clouds.features_list()
        # Result list.
        images = []
        for cloud_idx, (vert_pos, vert_col) in enumerate(
            zip(position_list, features_list)
        ):
            # Get extrinsics.
            cam_pos, cam_rot = self._extract_extrinsics(kwargs, cloud_idx)
            # Get intrinsics.
            (
                focal_length,
                sensor_width,
                principal_point_x,
                principal_point_y,
                znear,
                zfar,
            ) = self._extract_intrinsics(
                orthogonal_projection, kwargs, cloud_idx, cam_pos.device
            )
            # Put everything together.
            cam_params = torch.cat(
                (
                    cam_pos,
                    cam_rot.to(cam_pos.device),
                    torch.cat(
                        [
                            focal_length,
                            sensor_width,
                            principal_point_x,
                            principal_point_y,
                        ],
                    ),
                )
            )
            # Get point radiuses (can depend on camera position).
            vert_rad = self._get_vert_rad(
                vert_pos,
                cam_pos,
                orthogonal_projection,
                focal_length,
                kwargs,
                cloud_idx,
            )
            # Clean kwargs for passing on.
            gamma = kwargs["gamma"][cloud_idx]
            if "first_R_then_T" in kwargs.keys():
                raise ValueError("`first_R_then_T` is not supported in this interface.")
            otherargs = {
                argn: argv
                for argn, argv in kwargs.items()
                if argn
                not in [
                    "radius_world",
                    "gamma",
                    "znear",
                    "zfar",
                    "R",
                    "T",
                    "principal_point",
                    "focal_length",
                    "aspect_ratio",
                    "fov",
                    "degrees",
                    "min_x",
                    "max_x",
                    "min_y",
                    "max_y",
                ]
            }
            # background color
            if "bg_col" not in otherargs:
                bg_col = torch.zeros(
                    vert_col.shape[1], device=cam_params.device, dtype=torch.float32
                )
                otherargs["bg_col"] = bg_col
            # Go!
            images.append(
                self.renderer(
                    vert_pos=vert_pos,
                    vert_col=vert_col,
                    vert_rad=vert_rad,
                    cam_params=cam_params,
                    gamma=gamma,
                    max_depth=zfar,
                    min_depth=znear,
                    **otherargs,
                ).flip(dims=[0])
            )
        return torch.stack(images, dim=0)