in Experiments/PolicyNetworks.py [0:0]
def __init__(self, input_size, hidden_size, output_size, args, batch_size=1):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
super(ContinuousEncoderNetwork, self).__init__()
self.args = args
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.num_layers = 5
self.batch_size = batch_size
# Define a bidirectional LSTM now.
self.lstm = torch.nn.LSTM(input_size=self.input_size,hidden_size=self.hidden_size,num_layers=self.num_layers, bidirectional=True)
# Define output layers for the LSTM, and activations for this output layer.
# # Because it's bidrectional, once we compute <outputs, hidden = self.lstm(input)>, we must concatenate:
# # From reverse LSTM: <outputs[0,:,hidden_size:]> and from the forward LSTM: <outputs[-1,:,:hidden_size]>.
# # (Refer - https://towardsdatascience.com/understanding-bidirectional-rnn-in-pytorch-5bd25a5dd66 )
# # Because of this, the output layer must take in size 2*hidden.
# self.hidden_layer = torch.nn.Linear(2*self.hidden_size, self.hidden_size)
# self.output_layer = torch.nn.Linear(2*self.hidden_size, self.output_size)
# Sigmoid and Softmax activation functions for Bernoulli termination probability and latent z selection .
self.batch_softmax_layer = torch.nn.Softmax(dim=2)
self.batch_logsoftmax_layer = torch.nn.LogSoftmax(dim=2)
# Define output layers for the LSTM, and activations for this output layer.
self.mean_output_layer = torch.nn.Linear(2*self.hidden_size,self.output_size)
self.variances_output_layer = torch.nn.Linear(2*self.hidden_size, self.output_size)
self.activation_layer = torch.nn.Tanh()
self.variance_activation_layer = torch.nn.Softplus()
self.variance_activation_bias = 0.
self.variance_factor = 0.01