src/plots.py [138:144]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        start_index = 0
        for batch in img_samples.batches(inference_chunk_size):
            img_part, inference_dict_part = train_config.inference(batch, gradient=False, is_inference=True)
            if len(imgs) == 0:
                for j in range(len(img_part)):
                    imgs.append(torch.zeros((dim_h * dim_w, img_part[j].shape[-1]), device=train_config.device,
                                            dtype=torch.float32))
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



src/plots.py [244:250]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        start_index = 0
        for batch in img_samples.batches(inference_chunk_size):
            img_part, inference_dict_part = train_config.inference(batch, gradient=False, is_inference=True)
            if len(imgs) == 0:
                for j in range(len(img_part)):
                    imgs.append(torch.zeros((dim_h * dim_w, img_part[j].shape[-1]), device=train_config.device,
                                            dtype=torch.float32))
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



