in fairnr/renderer.py [0:0]
def generate(self, models, sample, **kwargs):
model = models[0]
model.eval()
logger.info("rendering starts. {}".format(model.text))
output_path = self.output_dir
image_names = []
sample, step, frames = self.parse_sample(sample)
# fix the rendering size
a = sample['size'][0,0,0] / self.resolution[0]
b = sample['size'][0,0,1] / self.resolution[1]
sample['size'][:, :, 0] /= a
sample['size'][:, :, 1] /= b
sample['size'][:, :, 2] *= a
sample['size'][:, :, 3] *= b
for shape in range(sample['shape'].size(0)):
max_step = step + frames
while step < max_step:
next_step = min(step + self.beam, max_step)
uv, inv_RT = zip(*[
self.generate_rays(
k,
sample['intrinsics'][shape],
sample['size'][shape, 0],
self.test_poses[k] if self.test_poses is not None else None)
for k in range(step, next_step)
])
if self.test_frameids is not None:
assert next_step - step == 1
ids = torch.tensor(self.test_frameids[step: next_step]).type_as(sample['id'])
else:
ids = sample['id'][shape:shape+1]
real_images = sample['full_rgb'] if 'full_rgb' in sample else sample['colors']
real_images = real_images.transpose(2, 3) if real_images.size(-1) != 3 else real_images
_sample = {
'id': ids,
'colors': torch.cat([real_images[shape:shape+1] for _ in range(step, next_step)], 1),
'intrinsics': sample['intrinsics'][shape:shape+1],
'extrinsics': torch.stack(inv_RT, 0).unsqueeze(0),
'uv': torch.stack(uv, 0).unsqueeze(0),
'shape': sample['shape'][shape:shape+1],
'view': torch.arange(
step, next_step,
device=sample['shape'].device).unsqueeze(0),
'size': torch.cat([sample['size'][shape:shape+1] for _ in range(step, next_step)], 1),
'step': step
}
with data_utils.GPUTimer() as timer:
outs = model(**_sample)
logger.info("rendering frame={}\ttotal time={:.4f}".format(step, timer.sum))
for k in range(step, next_step):
images = model.visualize(_sample, None, 0, k-step)
image_name = "{:04d}".format(k)
for key in images:
name, type = key.split('/')[0].split('_')
if type in self.output_type:
if name == 'coarse':
type = 'coarse-' + type
if name == 'target':
continue
prefix = os.path.join(output_path, type)
Path(prefix).mkdir(parents=True, exist_ok=True)
if type == 'point':
data_utils.save_point_cloud(
os.path.join(prefix, image_name + '.ply'),
images[key][:, :3].cpu().numpy(),
(images[key][:, 3:] * 255).cpu().int().numpy())
# from fairseq import pdb; pdb.set_trace()
else:
image = images[key].permute(2, 0, 1) \
if images[key].dim() == 3 else torch.stack(3*[images[key]], 0)
save_image(image, os.path.join(prefix, image_name + '.png'), format=None)
image_names.append(os.path.join(prefix, image_name + '.png'))
# save pose matrix
prefix = os.path.join(output_path, 'pose')
Path(prefix).mkdir(parents=True, exist_ok=True)
pose = self.test_poses[k] if self.test_poses is not None else inv_RT[k-step].cpu().numpy()
np.savetxt(os.path.join(prefix, image_name + '.txt'), pose)
step = next_step
logger.info("done")
return step, image_names