def val_forward()

in archs/models.py [0:0]


    def val_forward(self, x):
        img = x[0]
        batch_size = img.shape[0]
        pair_scores = torch.zeros(batch_size, len(self.val_attrs))
        pair_feats = torch.zeros(batch_size, len(self.val_attrs),
                                 self.args.emb_dim)
        pair_bs = len(self.val_attrs)

        for pi in range(math.ceil(len(self.val_attrs) / pair_bs)):
            self.compose_g = self.compose(
                self.val_attrs[pi * pair_bs:(pi + 1) * pair_bs],
                self.val_objs[pi * pair_bs:(pi + 1) * pair_bs])
            compose_g = self.compose_g
            expanded_im = img.unsqueeze(1).repeat(
                1, compose_g[0][0].shape[0],
                *tuple([1] * (img.dim() - 1))).view(-1, *img.shape[1:])
            expanded_compose_g = [[
                g.unsqueeze(0).repeat(batch_size, *tuple([1] * g.dim())).view(
                    -1, *g.shape[1:]) for g in layer_g
            ] for layer_g in compose_g]
            this_pair_scores, this_feat = self.comp_network(
                expanded_im, expanded_compose_g, return_feat=True)
            featnorm = torch.norm(this_feat, p=2, dim=-1)
            this_feat = this_feat.div(
                featnorm.unsqueeze(-1).expand_as(this_feat))

            this_pair_scores = this_pair_scores[:, :1].view(batch_size, -1)
            this_feat = this_feat.view(batch_size, -1, self.args.emb_dim)

            pair_scores[:, pi * pair_bs:pi * pair_bs +
                        this_pair_scores.shape[1]] = this_pair_scores[:, :]
            pair_feats[:, pi * pair_bs:pi * pair_bs +
                       this_pair_scores.shape[1], :] = this_feat[:]

        scores = {}
        feats = {}
        for i, (attr, obj) in enumerate(self.dset.pairs):
            scores[(attr, obj)] = pair_scores[:, i]
            feats[(attr, obj)] = pair_feats[:, i]

        return None, (scores, feats)