def __init__()

in neural/model.py [0:0]


    def __init__(self,
                 meg_dim,
                 forcing_dims,
                 meg_init=40,
                 n_subjects=100,
                 max_length=301,
                 subject_dim=16,
                 conv_layers=2,
                 kernel=4,
                 stride=2,
                 conv_channels=256,
                 lstm_hidden=256,
                 lstm_layers=2):
        super().__init__()
        self.forcing_dims = dict(forcing_dims)
        self.meg_init = meg_init

        in_channels = meg_dim + 1 + subject_dim + sum(forcing_dims.values())

        if subject_dim:
            self.subject_embedding = nn.Embedding(n_subjects, subject_dim)
        else:
            self.subject_embedding = None

        channels = conv_channels
        encoder = []
        for _ in range(conv_layers):
            encoder += [
                nn.Conv1d(in_channels, channels, kernel, stride, padding=kernel // 2),
                nn.ReLU(),
            ]
            in_channels = channels
        self.encoder = nn.Sequential(*encoder)
        if lstm_layers:
            self.lstm = nn.LSTM(
                input_size=in_channels,
                hidden_size=lstm_hidden,
                num_layers=lstm_layers)
            in_channels = lstm_hidden
        else:
            self.lstm = None
        self.conv_layers = conv_layers
        self.stride = stride
        self.kernel = kernel
        if conv_layers == 0:
            self.decoder = nn.Conv1d(in_channels, meg_dim, 1)
        else:
            decoder = []
            for index in range(conv_layers):
                if index == conv_layers - 1:
                    channels = meg_dim
                decoder += [
                    nn.ConvTranspose1d(in_channels, channels, kernel, stride, padding=kernel // 2),
                ]
                if index < conv_layers - 1:
                    decoder += [nn.ReLU()]
                in_channels = channels
            self.decoder = nn.Sequential(*decoder)