def __init__()

in source/nli.py [0:0]


    def __init__(self, fname='',
                 idim=4*1024, odim=2, nhid=None,
                 dropout=0.0, gpu=0, activation='TANH'):
        super(Net, self).__init__()
        self.gpu = gpu
        if os.path.isfile(fname):
            print(' - loading mlp from %s'.format(fname))
            loaded = torch.load(fname)
            self.mlp = loaded.mlp
        else:
            modules = []
            print(' - mlp {:d}'.format(idim), end='')
            if len(nhid) > 0:
                if dropout > 0:
                    modules.append(nn.Dropout(p=dropout))
                nprev = idim
                for nh in nhid:
                    if nh > 0:
                        modules.append(nn.Linear(nprev, nh))
                        nprev = nh
                        if activation == 'TANH':
                            modules.append(nn.Tanh())
                            print('-{:d}t'.format(nh), end='')
                        elif activation == 'RELU':
                            modules.append(nn.ReLU())
                            print('-{:d}r'.format(nh), end='')
                        else:
                            raise Exception('Unrecognised activation {activation}')
                        if dropout > 0:
                            modules.append(nn.Dropout(p=dropout))
                modules.append(nn.Linear(nprev, odim))
                print('-{:d}, dropout={:.1f}'.format(odim, dropout))
            else:
                modules.append(nn.Linear(idim, odim))
                print(' - mlp {:d}-{:d}'.format(idim, odim))
            self.mlp = nn.Sequential(*modules)

        if self.gpu >= 0:
            self.mlp = self.mlp.cuda()