solver/self_play_qgen_vdst_oracle_vilbert.py [20:90]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NUM_LOG_TEXT_SAMPLES = 5 # must < len(valid_set)


class SelfPlaySolver(BaseSolver):
    def __init__(self, config, args, mode):
        super().__init__(config, args, mode)
        self.step = 0
        self.best_score = -1e10

    def load_dataloader(self, img_feat_readers, img_feat_readers_gt, tokenizer, splits):
        # Prepare self.train_set, self.valid_set, self.test_set
        dataroot = self.config['data']['dataroot']
        batch_size = self.config['data']['batch_size']
        # splits = ['train', 'valid'] if self.mode == 'train' else ['test', 'valid']
        for split in splits:
            dataset = SelfPlayDataset(
                dataroot, 
                split, 
                img_feat_readers[split], 
                tokenizer, 
                padding_index=tokenizer.pad_id,
                image_features_reader_gt=img_feat_readers_gt[split],
                )
            if not hasattr(self, 'answer2id'):    self.answer2id = dataset.answer2id
            if not hasattr(self, 'answer2token'): self.answer2token = dataset.answer2token
            # Set self.XXX_set = torch.utils.data.Dataloader
            setattr(
                self,
                split+'_set',
                DataLoader(
                    dataset,
                    batch_size=batch_size if split == 'train' else 4*batch_size,
                    shuffle=(split=='train'),
                    drop_last=False,
                    collate_fn=partial(collate_fn, wrd_pad_id=tokenizer.pad_id),
                    num_workers=self.args.n_jobs,
                    pin_memory=self.args.pin_memory)
            )


    def load_data(self):
        self.verbose(['Loading data...'])
        config = self.config
        if config['data']['tokenizer'].lower() == 'bert':
            self.verbose(["Use Bert tokenizer"])
            tokenizer = BERT_Tokenizer.from_pretrained(
                'bert-base-uncased', do_lower_case=True)
        else:
            tokenizer = GW_Tokenizer(config['data']['vocab_path'])
        
        feat_path_qgen = config['data']['features_path']['qgen']
        feat_path_oracle = config['data']['features_path']['oracle']
        feat_path_gt = config['data']['features_path_gt']
        splits = ['train', 'valid'] if self.mode == 'train' else ['test']
        img_feat_readers = {
            split: {
                'qgen': image_features_reader(feat_path_qgen[split]), 
                'oracle': image_features_reader_vb(feat_path_oracle[split]), 
                }
            for split in splits}
        img_feat_readers_gt = {
            split: image_features_reader_gt(feat_path_gt[split]) 
            for split in splits}
        self.load_dataloader(img_feat_readers, img_feat_readers_gt, tokenizer, splits)
        self.tokenizer = tokenizer
        if self.mode == 'train':
            self.steps_per_epoch = len(self.train_set)
            self.max_step = self.steps_per_epoch * self.max_epoch

    def fetch_data(self, data):
        game, image_features_rcnn_qgen, bboxs_rcnn_qgen, image_features_rcnn_oracle, bboxs_rcnn_oracle, \
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



solver/self_play_qgen_vdst_oracle_vilbert_guesser_vilbert.py [20:90]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NUM_LOG_TEXT_SAMPLES = 5 # must < len(valid_set)


class SelfPlaySolver(BaseSolver):
    def __init__(self, config, args, mode):
        super().__init__(config, args, mode)
        self.step = 0
        self.best_score = -1e10

    def load_dataloader(self, img_feat_readers, img_feat_readers_gt, tokenizer, splits):
        # Prepare self.train_set, self.valid_set, self.test_set
        dataroot = self.config['data']['dataroot']
        batch_size = self.config['data']['batch_size']
        # splits = ['train', 'valid'] if self.mode == 'train' else ['test', 'valid']
        for split in splits:
            dataset = SelfPlayDataset(
                dataroot, 
                split, 
                img_feat_readers[split], 
                tokenizer, 
                padding_index=tokenizer.pad_id,
                image_features_reader_gt=img_feat_readers_gt[split],
                )
            if not hasattr(self, 'answer2id'):    self.answer2id = dataset.answer2id
            if not hasattr(self, 'answer2token'): self.answer2token = dataset.answer2token
            # Set self.XXX_set = torch.utils.data.Dataloader
            setattr(
                self,
                split+'_set',
                DataLoader(
                    dataset,
                    batch_size=batch_size if split == 'train' else 4*batch_size,
                    shuffle=(split=='train'),
                    drop_last=False,
                    collate_fn=partial(collate_fn, wrd_pad_id=tokenizer.pad_id),
                    num_workers=self.args.n_jobs,
                    pin_memory=self.args.pin_memory)
            )


    def load_data(self):
        self.verbose(['Loading data...'])
        config = self.config
        if config['data']['tokenizer'].lower() == 'bert':
            self.verbose(["Use Bert tokenizer"])
            tokenizer = BERT_Tokenizer.from_pretrained(
                'bert-base-uncased', do_lower_case=True)
        else:
            tokenizer = GW_Tokenizer(config['data']['vocab_path'])
        
        feat_path_qgen = config['data']['features_path']['qgen']
        feat_path_oracle = config['data']['features_path']['oracle']
        feat_path_gt = config['data']['features_path_gt']
        splits = ['train', 'valid'] if self.mode == 'train' else ['test']
        img_feat_readers = {
            split: {
                'qgen': image_features_reader(feat_path_qgen[split]), 
                'oracle': image_features_reader_vb(feat_path_oracle[split]), 
                }
            for split in splits}
        img_feat_readers_gt = {
            split: image_features_reader_gt(feat_path_gt[split]) 
            for split in splits}
        self.load_dataloader(img_feat_readers, img_feat_readers_gt, tokenizer, splits)
        self.tokenizer = tokenizer
        if self.mode == 'train':
            self.steps_per_epoch = len(self.train_set)
            self.max_step = self.steps_per_epoch * self.max_epoch

    def fetch_data(self, data):
        game, image_features_rcnn_qgen, bboxs_rcnn_qgen, image_features_rcnn_oracle, bboxs_rcnn_oracle, \
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



