in benchmark/benchmark/logs.py [0:0]
def __init__(self, clients, primaries, workers, faults=0):
inputs = [clients, primaries, workers]
assert all(isinstance(x, list) for x in inputs)
assert all(isinstance(x, str) for y in inputs for x in y)
assert all(x for x in inputs)
self.faults = faults
if isinstance(faults, int):
self.committee_size = len(primaries) + int(faults)
self.workers = len(workers) // len(primaries)
else:
self.committee_size = '?'
self.workers = '?'
# Parse the clients logs.
try:
with Pool() as p:
results = p.map(self._parse_clients, clients)
except (ValueError, IndexError, AttributeError) as e:
raise ParseError(f'Failed to parse clients\' logs: {e}')
self.size, self.rate, self.start, misses, self.sent_samples \
= zip(*results)
self.misses = sum(misses)
# Parse the primaries logs.
try:
with Pool() as p:
results = p.map(self._parse_primaries, primaries)
except (ValueError, IndexError, AttributeError) as e:
raise ParseError(f'Failed to parse nodes\' logs: {e}')
proposals, commits, self.configs, primary_ips = zip(*results)
self.proposals = self._merge_results([x.items() for x in proposals])
self.commits = self._merge_results([x.items() for x in commits])
# Parse the workers logs.
try:
with Pool() as p:
results = p.map(self._parse_workers, workers)
except (ValueError, IndexError, AttributeError) as e:
raise ParseError(f'Failed to parse workers\' logs: {e}')
sizes, self.received_samples, workers_ips = zip(*results)
self.sizes = {
k: v for x in sizes for k, v in x.items() if k in self.commits
}
# Determine whether the primary and the workers are collocated.
self.collocate = set(primary_ips) == set(workers_ips)
# Check whether clients missed their target rate.
if self.misses != 0:
Print.warn(
f'Clients missed their target rate {self.misses:,} time(s)'
)