in vae_helpers.py [0:0]
def sample_from_discretized_mix_logistic(l, nr_mix):
ls = [s for s in l.shape]
xs = ls[:-1] + [3]
# unpack parameters
logit_probs = l[:, :, :, :nr_mix]
l = torch.reshape(l[:, :, :, nr_mix:], xs + [nr_mix * 3])
# sample mixture indicator from softmax
eps = torch.empty(logit_probs.shape, device=l.device).uniform_(1e-5, 1. - 1e-5)
amax = torch.argmax(logit_probs - torch.log(-torch.log(eps)), dim=3)
sel = F.one_hot(amax, num_classes=nr_mix).float()
sel = torch.reshape(sel, xs[:-1] + [1, nr_mix])
# select logistic parameters
means = (l[:, :, :, :, :nr_mix] * sel).sum(dim=4)
log_scales = const_max((l[:, :, :, :, nr_mix:nr_mix * 2] * sel).sum(dim=4), -7.)
coeffs = (torch.tanh(l[:, :, :, :, nr_mix * 2:nr_mix * 3]) * sel).sum(dim=4)
# sample from logistic & clip to interval
# we don't actually round to the nearest 8bit value when sampling
u = torch.empty(means.shape, device=means.device).uniform_(1e-5, 1. - 1e-5)
x = means + torch.exp(log_scales) * (torch.log(u) - torch.log(1. - u))
x0 = const_min(const_max(x[:, :, :, 0], -1.), 1.)
x1 = const_min(const_max(x[:, :, :, 1] + coeffs[:, :, :, 0] * x0, -1.), 1.)
x2 = const_min(const_max(x[:, :, :, 2] + coeffs[:, :, :, 1] * x0 + coeffs[:, :, :, 2] * x1, -1.), 1.)
return torch.cat([torch.reshape(x0, xs[:-1] + [1]), torch.reshape(x1, xs[:-1] + [1]), torch.reshape(x2, xs[:-1] + [1])], dim=3)