def read_representations()

in datasets/epic_kitchens.py [0:0]


    def read_representations(self, frames, env, frame_format):
        """Reads a set of representations, given their frame names and an LMDB
            environment.
            From https://github.com/fpv-iplab/rulstm/blob/96e38666fad7feafebbeeae94952dba24771e512/RULSTM/dataset.py#L10
        """
        features = []
        # for each frame
        for frame_id in frames:
            # read the current frame
            with env.begin() as e:
                # Need to search for a frame that has features stored,
                # the exact frame may not have.
                # To avoid looking at the future when training/testing,
                # (important for anticipation), look only for previous to
                # current position.
                dd = None
                search_radius = 0
                for search_radius in range(10):
                    dd = e.get(
                        frame_format.format(
                            frame_id - search_radius).strip().encode('utf-8'))
                    if dd is not None:
                        break
                if dd is not None and search_radius > 0:
                    if self.warn_if_using_closeby_frame:
                        logging.warning('Missing %s, but used %d instead',
                                        frame_format.format(frame_id),
                                        frame_id - search_radius)
            if dd is None:
                logging.error(
                    'Missing %s, Only specific frames are stored in lmdb :(',
                    frame_format.format(frame_id))
                features.append(None)
            else:
                # convert to numpy array
                data = np.frombuffer(dd, 'float32')
                # append to list
                features.append(data)
        # For any frames we didn't find a feature, use a series of 0s
        features_not_none = [el for el in features if el is not None]
        assert len(features_not_none) > 0, (
            f'No features found in {frame_format} - {frames}')
        feature_not_none = features_not_none[0]  # any
        features = [
            np.zeros_like(feature_not_none) if el is None else el
            for el in features
        ]
        # convert list to numpy array
        features = np.array(features)
        # Add singleton dimensions to make it look like a video, so
        # rest of the code just works
        features = features[:, np.newaxis, np.newaxis, :]
        # Make it torch Tensor to be consistent
        features = torch.as_tensor(features)
        return features