def collate_benchmarks()

in aepsych/benchmark/pathos_benchmark.py [0:0]


    def collate_benchmarks(self, wait: bool = False) -> None:
        """Collect benchmark results from completed futures.

        Args:
            wait (bool, optional): If true, this method blocks and waits
            on all futures to complete. Defaults to False.
        """
        newfutures = []
        while self.futures:
            item = self.futures.pop()
            if wait or item.ready():
                result = item.get()
                if isinstance(result, BenchmarkLogger):
                    self.loggers.append(result)
            else:
                newfutures.append(item)

        self.futures = newfutures

        if len(self.loggers) > 0:
            out_logger = BenchmarkLogger()
            for logger in self.loggers:
                out_logger._log.extend(logger._log)
            self.logger = out_logger