mdr/qa/qa_trainer.py [302:329]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                sp_scores = outputs["sp_score"]
                sp_scores = sp_scores.float().masked_fill(batch_to_feed["sent_offsets"].eq(0), float("-inf")).type_as(sp_scores)
                batch_sp_scores = sp_scores.sigmoid()
                # ans_type_predicted = torch.argmax(outputs["ans_type_logits"], dim=1).view(-1).tolist()
                outs = [outputs["start_logits"], outputs["end_logits"]]
            for qid, label, score in zip(batch_qids, batch_labels, scores):
                id2result[qid].append((label, score))

            # answer prediction
            span_scores = outs[0][:, :, None] + outs[1][:, None]
            max_seq_len = span_scores.size(1)
            span_mask = np.tril(np.triu(np.ones((max_seq_len, max_seq_len)), 0), args.max_ans_len)
            span_mask = span_scores.data.new(max_seq_len, max_seq_len).copy_(torch.from_numpy(span_mask))
            span_scores_masked = span_scores.float().masked_fill((1 - span_mask[None].expand_as(span_scores)).bool(), -1e10).type_as(span_scores)
            start_position = span_scores_masked.max(dim=2)[0].max(dim=1)[1]
            end_position = span_scores_masked.max(dim=2)[1].gather(
                1, start_position.unsqueeze(1)).squeeze(1)
            answer_scores = span_scores_masked.max(dim=2)[0].max(dim=1)[0].tolist()
            para_offset = batch['para_offsets']
            start_position_ = list(
                np.array(start_position.tolist()) - np.array(para_offset))
            end_position_ = list(
                np.array(end_position.tolist()) - np.array(para_offset))  
 
            for idx, qid in enumerate(batch_qids):
                id2gold[qid] = batch["gold_answer"][idx]
                id2goldsp[qid] = batch["sp_gold"][idx]
                rank_score = scores[idx]
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



scripts/train_qa.py [235:264]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                sp_scores = outputs["sp_score"]
                sp_scores = sp_scores.float().masked_fill(batch_to_feed["sent_offsets"].eq(0), float("-inf")).type_as(sp_scores)
                batch_sp_scores = sp_scores.sigmoid()

            # ans_type_predicted = torch.argmax(outputs["ans_type_logits"], dim=1).view(-1).tolist()
            outs = [outputs["start_logits"], outputs["end_logits"]]
        for qid, label, score in zip(batch_qids, batch_labels, scores):
            id2result[qid].append((label, score))

        # answer prediction
        span_scores = outs[0][:, :, None] + outs[1][:, None]
        max_seq_len = span_scores.size(1)
        span_mask = np.tril(np.triu(np.ones((max_seq_len, max_seq_len)), 0), args.max_ans_len)
        span_mask = span_scores.data.new(max_seq_len, max_seq_len).copy_(torch.from_numpy(span_mask))
        span_scores_masked = span_scores.float().masked_fill((1 - span_mask[None].expand_as(span_scores)).bool(), -1e10).type_as(span_scores)
        start_position = span_scores_masked.max(dim=2)[0].max(dim=1)[1]
        end_position = span_scores_masked.max(dim=2)[1].gather(
            1, start_position.unsqueeze(1)).squeeze(1)
        answer_scores = span_scores_masked.max(dim=2)[0].max(dim=1)[0].tolist()
        para_offset = batch['para_offsets']
        start_position_ = list(
            np.array(start_position.tolist()) - np.array(para_offset))
        end_position_ = list(
            np.array(end_position.tolist()) - np.array(para_offset)) 

        for idx, qid in enumerate(batch_qids):
            id2gold[qid] = batch["gold_answer"][idx]
            id2goldsp[qid] = batch["sp_gold"][idx]

            rank_score = scores[idx]
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



