def __init__()

in lib/utils/metrics.py [0:0]


    def __init__(self, model, split, video_idx_to_name, total_num_boxes):
        self.model = model
        self.split = split
        self.video_idx_to_name = video_idx_to_name
        self._total_num_boxes = total_num_boxes

        self.best_top1 = float('inf')
        self.best_top5 = float('inf')

        self.best_map = float('inf') * (-1.0)
        self.lr = 0  # only used by train.
        self.num_test_clips = 1
        self.reset()

        if cfg.DATASET == "ava":
            # We load AVA annotations only once here, rather than loading
            # them every time we call AVA evaluation code.
            self.excluded_keys = read_exclusions(
                os.path.join(cfg.AVA.ANNOTATION_DIR,
                             "ava_val_excluded_timestamps_v2.1.csv"))

            self.categories, self.class_whitelist = read_labelmap(
                os.path.join(
                    cfg.AVA.ANNOTATION_DIR,
                    "ava_action_list_v2.1_for_activitynet_2018.pbtxt"))

            logger.info("CATEGORIES (%d):\n%s", len(self.categories),
                        pprint.pformat(self.categories, indent=2))

            gt_filename = os.path.join(cfg.AVA.ANNOTATION_DIR, "ava_val_v2.1.csv")
            self.full_groundtruth = read_csv(
                gt_filename,
                self.class_whitelist)

            self.mini_groundtruth = get_ava_mini_groundtruth(
                self.full_groundtruth)

            logger.info('%d (mini: %d) GT boxes loaded from %s.' % (
                len(self.full_groundtruth[0]),
                len(self.mini_groundtruth[0]),
                gt_filename))
            logger.info('%d (mini: %d) GT labels loaded from %s.' % (
                len(self.full_groundtruth[0]),
                len(self.mini_groundtruth[0]),
                gt_filename))
        elif cfg.DATASET == 'charades':
            self.num_test_clips = cfg.CHARADES.NUM_TEST_CLIPS