in LearningMachine.py [0:0]
def __init__(self, phase, conf, problem, vocab_info=None, initialize=True, use_gpu=False, **kwargs):
if initialize is True:
assert vocab_info is not None
self.model = Model(conf, problem, vocab_info, use_gpu)
if use_gpu is True:
self.model = nn.DataParallel(self.model)
self.model = transfer_to_gpu(self.model)
# judge the embedding matrix weight's device
emb_weight_device = list(self.model.module.layers.embedding.embeddings.values())[0].weight.device.type if isinstance(self.model, nn.DataParallel) \
else list(self.model.layers.embedding.embeddings.values())[0].weight.device.type
device = 'GPU' if 'cuda' in emb_weight_device else 'CPU'
logging.info(
"The embedding matrix is on %s now, you can modify the weight_on_gpu parameter to change embeddings weight device." % device)
logging.info("="*100 + '\n' + "*"*15 + "Model Achitecture" + "*"*15)
logging.info(self.model)
#logging.info("Total parameters: %d; trainable parameters: %d" % (get_param_num(self.model), get_trainable_param_num(self.model)))
logging.info("Total trainable parameters: %d" % (get_trainable_param_num(self.model)))
logging.info("Model built!")
else:
self.model = None
self.conf = conf
self.problem = problem
self.phase = phase
self.use_gpu = use_gpu
# if it is a 2-class classification problem, figure out the real positive label
# CAUTION: multi-class classification
if phase != 'predict':
if 'auc' in conf.metrics:
if not hasattr(self.conf, 'pos_label') or self.conf.pos_label is None:
if problem.output_dict.cell_num() == 2 and \
problem.output_dict.has_cell("0") and problem.output_dict.has_cell("1"):
self.conf.pos_label = problem.output_dict.id("1")
logging.debug("Postive label (target index): %d" % self.conf.pos_label)
else:
# default
raise Exception('Please configure the positive label for auc metric at inputs/positive_label in the configuration file')
else:
self.conf.pos_label = problem.output_dict.id(self.conf.pos_label)
else:
self.conf.pos_label = 1 # whatever
self.metrics = conf.metrics
if ProblemTypes[self.problem.problem_type] == ProblemTypes.classification \
or ProblemTypes[self.problem.problem_type] == ProblemTypes.sequence_tagging:
self.evaluator = Evaluator(metrics=self.metrics, pos_label=self.conf.pos_label, tagging_scheme=problem.tagging_scheme, label_indices=self.problem.output_dict.cell_id_map)
elif ProblemTypes[self.problem.problem_type] == ProblemTypes.regression:
self.evaluator = Evaluator(metrics=self.metrics, pos_label=self.conf.pos_label, tagging_scheme=problem.tagging_scheme, label_indices=None)
elif ProblemTypes[self.problem.problem_type] == ProblemTypes.mrc:
curr_mrc_metric = []
for single_mrc_metric in self.metrics:
if 'mrc' in single_mrc_metric.lower():
curr_mrc_metric.append(single_mrc_metric.lower())
else:
curr_mrc_metric.append('mrc_' + single_mrc_metric.lower())
self.evaluator = Evaluator(metrics=curr_mrc_metric, pos_label=self.conf.pos_label, tagging_scheme=problem.tagging_scheme, label_indices=None)
self.use_gpu = use_gpu
self.best_test_result = "(No best test result yet)"