def save_forward_results()

in mmf/models/mesh_renderer.py [0:0]


    def save_forward_results(self, sample_list, xy_offset, z_grid, rendering_results):
        texture_image_rec = rendering_results["texture_image_rec"]
        rgba_0_rec, rgba_1_rec = rendering_results["rgba_out_rec_list"]
        rgba_0_rec = rgba_0_rec.clamp(min=0, max=1)
        rgba_1_rec = rgba_1_rec.clamp(min=0, max=1)
        depth_0_rec, depth_1_rec = rendering_results["depth_out_rec_list"]

        if self.config.render_mesh_shape_for_vis:
            mesh_shape_0, mesh_shape_1 = rendering_results["mesh_shape_out_list"]
            mesh_verts_world_coords = rendering_results["mesh_verts_world_coords"]

        for n_im in range(xy_offset.size(0)):
            image_id = byte_tensor_to_object(sample_list.image_id[n_im])
            if self.config.save_for_realestate10k_eval:
                # save for RealEstate10K evaluation, to be used by
                # https://github.com/facebookresearch/synsin/blob/master/evaluation/evaluate_perceptualsim.py
                # see https://github.com/facebookresearch/synsin/blob/master/REALESTATE.md for details
                save_sub_dir = os.path.join(
                    self.config.forward_results_dir, image_id.split("_")[0]
                )
                os.makedirs(save_sub_dir, exist_ok=True)

                im_output = rendering_results["rgb_1_out"][n_im].clamp(min=0, max=1)
                im_input = sample_list.orig_img_0[n_im].clamp(min=0, max=1)
                im_tgt = sample_list.orig_img_1[n_im].clamp(min=0, max=1)
                im_output = skimage.img_as_ubyte(im_output.detach().cpu().numpy())
                im_input = skimage.img_as_ubyte(im_input.detach().cpu().numpy())
                im_tgt = skimage.img_as_ubyte(im_tgt.detach().cpu().numpy())

                skimage.io.imsave(save_sub_dir + "/output_image_.png", im_output)
                skimage.io.imsave(save_sub_dir + "/input_image_.png", im_input)
                skimage.io.imsave(save_sub_dir + "/tgt_image_.png", im_tgt)
                continue

            if self.config.save_for_external_inpainting:
                # save PNG image files for external inpainting training and eval
                im_src = rgba_1_rec[n_im].clamp(min=0, max=1)
                im_src_alpha_mask = im_src[..., 3:4].ge(1e-4).float()
                # save source image into RGBA PNG file, where the last channel is
                # the visibility alpha_mask
                im_src = torch.cat([im_src[..., :3], im_src_alpha_mask], dim=-1)
                im_tgt = sample_list.orig_img_1[n_im].clamp(min=0, max=1)

                base_id = image_id.split("_")[0]
                im_src = skimage.img_as_ubyte(im_src.detach().cpu().numpy())
                im_tgt = skimage.img_as_ubyte(im_tgt.detach().cpu().numpy())
                skimage.io.imsave(self.inpainting_src_dir + f"/{base_id}.png", im_src)
                skimage.io.imsave(self.inpainting_tgt_dir + f"/{base_id}.png", im_tgt)
                continue

            save_file = os.path.join(
                self.config.forward_results_dir,
                '{}_outputs.npz'.format(image_id.replace("/", "-"))
            )
            save_dict = {
                "orig_img_0": sample_list.orig_img_0[n_im],
                "orig_img_1": sample_list.orig_img_1[n_im],
                "xy_offset": xy_offset[n_im],
                "z_grid": z_grid[n_im],
                "texture_image_rec": texture_image_rec[n_im],
                "rgba_0_rec": rgba_0_rec[n_im],
                "rgba_1_rec": rgba_1_rec[n_im],
                "depth_0_rec": depth_0_rec[n_im],
                "depth_1_rec": depth_1_rec[n_im],
            }
            if self.config.render_mesh_shape_for_vis:
                save_dict.update({
                    "mesh_shape_0": mesh_shape_0[n_im],
                    "mesh_shape_1": mesh_shape_1[n_im],
                    "mesh_verts_world_coords": mesh_verts_world_coords[n_im],
                })
            if sample_list.dataset_name in ["synsin_habitat", "replica"]:
                save_dict.update({
                    "depth_0": sample_list.depth_0[n_im],
                    "depth_1": sample_list.depth_1[n_im],
                    "depth_mask_0": sample_list.depth_mask_0[n_im],
                    "depth_mask_1": sample_list.depth_mask_1[n_im],
                })
            if self.config.use_inpainting:
                rgb_1_inpaint = rendering_results["rgb_1_inpaint"]
                rgb_1_inpaint = rgb_1_inpaint.clamp(min=0, max=1)
                save_dict.update({"rgb_1_inpaint": rgb_1_inpaint[n_im]})

            save_dict = {k: v.detach().cpu().numpy() for k, v in save_dict.items()}
            np.savez(save_file, **save_dict)